id
stringlengths
10
18
content
stringlengths
4
768k
max_stars_repo_path
stringlengths
115
237
youtube-dl-bug-6
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals import base64 import calendar import codecs import contextlib import ctypes import datetime import email.utils import errno import functools import gzip import itertools import io import json import locale import math import operator import os import pipes import platform import re import ssl import socket import struct import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_basestring, compat_chr, compat_etree_fromstring, compat_html_entities, compat_http_client, compat_kwargs, compat_parse_qs, compat_socket_create_connection, compat_str, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlparse, compat_urllib_request, compat_urlparse, shlex_quote, ) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/20.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) if val: assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): # Here comes the crazy part: In 2.6, if the xpath is a unicode, # .//node does not match if a node is a direct child of . ! if isinstance(xpath, compat_str): xpath = xpath.encode('ascii') for f in node.findall(xpath): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): if sys.version_info < (2, 7): # Crazy 2.6 xpath = xpath.encode('ascii') return node.find(xpath) if isinstance(xpath, (str, compat_str)): n = _find_xpath(xpath) else: for xp in xpath: n = _find_xpath(xp) if n is not None: break if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute("id", id, html) def get_element_by_attribute(attribute, value, html): """Return the content of the tag with the specified attribute in the passed HTML document""" m = re.search(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), re.escape(value)), html) if not m: return None res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] return unescapeHTML(res) def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of # unwanted failures due to missing protocol def sanitized_Request(url, *args, **kwargs): return compat_urllib_request.Request( 'http:%s' % url if url.startswith('//') else url, *args, **kwargs) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity): """Transforms an HTML entity to a character.""" # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 # See https://github.com/rg3/youtube-dl/issues/7518 try: return compat_chr(int(numstr, base)) except ValueError: pass # Unknown entity in name, return its literal representation return '&%s;' % entity def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+);', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): # Both in bytes self.downloaded = downloaded self.expected = expected def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs[b'strict'] = True hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc def handle_youtubedl_headers(headers): filtered_headers = headers if 'Youtubedl-no-compression' in filtered_headers: filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') del filtered_headers['Youtubedl-no-compression'] return filtered_headers class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-no-compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): return self.do_open(functools.partial( _create_http_connection, self, compat_http_client.HTTPConnection, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request new_req = req_type( url_escaped, data=req.data, headers=req.headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) new_req.timeout = req.timeout req = new_req for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) req.headers = handle_youtubedl_headers(req.headers) if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname return self.do_open(functools.partial( _create_http_connection, self, self._https_conn_class, True), req, **kwargs) class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: m = re.search( r'(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group(0))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) try: date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) except ValueError: pass def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # %z (UTC offset) is only supported in python>=3.2 if not re.match(r'^[0-9]{1,2}-[0-9]{1,2}-[0-9]{4}$', date_str): date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) format_expressions = [ '%d %B %Y', '%d %b %Y', '%B %d %Y', '%b %d %Y', '%b %dst %Y %I:%M%p', '%b %dnd %Y %I:%M%p', '%b %dth %Y %I:%M%p', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', ] if day_first: format_expressions.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) else: format_expressions.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) for expression in format_expressions: try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') if upload_date is not None: return compat_str(upload_date) def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess elif guess.rstrip('/') in ( 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', 'flv', 'f4v', 'f4a', 'f4b', 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', 'mkv', 'mka', 'mk3d', 'avi', 'divx', 'mov', 'asf', 'wmv', 'wma', '3gp', '3g2', 'mp3', 'flac', 'ape', 'wav', 'f4f', 'f4m', 'm3u8', 'smil'): return guess.rstrip('/') else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad aproximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, "%Y%m%d").date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( (b"GetStdHandle", ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)((b"WriteConsoleW", ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b"GetFileType", ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( (b"GetConsoleMode", ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ sdata = compat_urllib_parse.urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and inofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, } units_re = '|'.join(re.escape(u) for u in _UNIT_TABLE) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = _UNIT_TABLE[m.group('unit')] return int(float(num_str) * mult) def month_by_name(name): """ Return the number of a month by (locale-independently) English name """ try: return ENGLISH_MONTH_NAMES.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) try: libc = ctypes.cdll.LoadLibrary("libc.so.6") except OSError: return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): if s.startswith(start): return s[len(start):] return s def remove_end(s, end): if s.endswith(end): return s[:-len(end)] return s def remove_quotes(s): if s is None or len(s) < 2: return s for quote in ('"', "'", ): if s[0] == quote and s[-1] == quote: return s[1:-1] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] class HEADRequest(compat_urllib_request.Request): def get_method(self): return "HEAD" def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() m = re.match( r'''(?ix)(?:P?T)? (?: (?P<only_mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*| (?P<only_hours>[0-9.]+)\s*(?:hours?)| \s*(?P<hours_reversed>[0-9]+)\s*(?:[:h]|hours?)\s*(?P<mins_reversed>[0-9]+)\s*(?:[:m]|mins?\.?|minutes?)\s*| (?: (?: (?:(?P<days>[0-9]+)\s*(?:[:d]|days?)\s*)? (?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s* )? (?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s* )? (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)? )$''', s) if not m: return None res = 0 if m.group('only_mins'): return float_or_none(m.group('only_mins'), invscale=60) if m.group('only_hours'): return float_or_none(m.group('only_hours'), invscale=60 * 60) if m.group('secs'): res += int(m.group('secs')) if m.group('mins_reversed'): res += int(m.group('mins_reversed')) * 60 if m.group('mins'): res += int(m.group('mins')) * 60 if m.group('hours'): res += int(m.group('hours')) * 60 * 60 if m.group('hours_reversed'): res += int(m.group('hours_reversed')) * 60 * 60 if m.group('days'): res += int(m.group('days')) * 24 * 60 * 60 if m.group('ms'): res += float(m.group('ms')) return res def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize): self._pagefunc = pagefunc self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = list(self._pagefunc(pagenum)) startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() try: struct.pack('!I', 0) except TypeError: # In Python 2.6 (and some 2.7 versions), struct requires a bytes argument def struct_pack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.pack(spec, *args) def struct_unpack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.unpack(spec, *args) else: struct_pack = struct.pack struct_unpack = struct.unpack def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii') def encode_dict(d, encoding='utf-8'): def encode(v): return v.encode(encoding) if isinstance(v, compat_basestring) else v return dict((encode(k), encode(v)) for k, v in d.items()) US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } def parse_age_limit(s): if s is None: return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) return int(m.group('age')) if m else US_RATINGS.get(s, None) def strip_jsonp(code): return re.sub( r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code) def js_to_json(code): def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v if v.startswith('"'): v = re.sub(r"\\'", "'", v[1:-1]) elif v.startswith("'"): v = v[1:-1] v = re.sub(r"\\\\|\\'|\"", lambda m: { '\\\\': '\\\\', "\\'": "'", '"': '\\"', }[m.group(0)], v) return '"%s"' % v res = re.sub(r'''(?x) "(?:[^"\\]*(?:\\\\|\\['"nu]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nu]))*[^'\\]*'| [a-zA-Z_][.a-zA-Z_0-9]* ''', fix_kv, code) res = re.sub(r',(\s*[\]}])', lambda m: m.group(1), res) return res def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(shlex_quote(a) for a in args) def mimetype2ext(mt): _, _, res = mt.rpartition('/') return { 'x-ms-wmv': 'wmv', 'x-mp4-fragmented': 'mp4', 'ttml+xml': 'ttml', }.get(res, res) def urlhandle_detect_ext(url_handle): try: url_handle.headers getheader = lambda h: url_handle.headers[h] except AttributeError: # Python < 3 getheader = url_handle.info().getheader cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] if m.group('strval') is not None: if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('strval') else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) actual_value = dct.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return 0.0 mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:\.\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3)) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): _x = functools.partial(xpath_with_ns, ns_map={ 'ttml': 'http://www.w3.org/ns/ttml', 'ttaf1': 'http://www.w3.org/2006/10/ttaf1', }) def parse_node(node): str_or_empty = functools.partial(str_or_none, default='') out = str_or_empty(node.text) for child in node: if child.tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'): out += '\n' + str_or_empty(child.tail) elif child.tag in (_x('ttml:span'), _x('ttaf1:span'), 'span'): out += str_or_empty(parse_node(child)) else: out += str_or_empty(xml.etree.ElementTree.tostring(child)) return out dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8')) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib['begin']) end_time = parse_dfxp_time_expr(para.attrib.get('end')) if not end_time: end_time = begin_time + parse_dfxp_time_expr(para.attrib['dur']) out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) return compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type) #!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals import base64 import calendar import codecs import contextlib import ctypes import datetime import email.utils import errno import functools import gzip import itertools import io import json import locale import math import operator import os import pipes import platform import re import ssl import socket import struct import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_basestring, compat_chr, compat_etree_fromstring, compat_html_entities, compat_http_client, compat_kwargs, compat_parse_qs, compat_socket_create_connection, compat_str, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlparse, compat_urllib_request, compat_urlparse, shlex_quote, ) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/20.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) if val: assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): # Here comes the crazy part: In 2.6, if the xpath is a unicode, # .//node does not match if a node is a direct child of . ! if isinstance(xpath, compat_str): xpath = xpath.encode('ascii') for f in node.findall(xpath): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): if sys.version_info < (2, 7): # Crazy 2.6 xpath = xpath.encode('ascii') return node.find(xpath) if isinstance(xpath, (str, compat_str)): n = _find_xpath(xpath) else: for xp in xpath: n = _find_xpath(xp) if n is not None: break if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute("id", id, html) def get_element_by_attribute(attribute, value, html): """Return the content of the tag with the specified attribute in the passed HTML document""" m = re.search(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), re.escape(value)), html) if not m: return None res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] return unescapeHTML(res) def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of # unwanted failures due to missing protocol def sanitized_Request(url, *args, **kwargs): return compat_urllib_request.Request( 'http:%s' % url if url.startswith('//') else url, *args, **kwargs) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity): """Transforms an HTML entity to a character.""" # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 # See https://github.com/rg3/youtube-dl/issues/7518 try: return compat_chr(int(numstr, base)) except ValueError: pass # Unknown entity in name, return its literal representation return '&%s;' % entity def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+);', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): # Both in bytes self.downloaded = downloaded self.expected = expected def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs[b'strict'] = True hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc def handle_youtubedl_headers(headers): filtered_headers = headers if 'Youtubedl-no-compression' in filtered_headers: filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') del filtered_headers['Youtubedl-no-compression'] return filtered_headers class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-no-compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): return self.do_open(functools.partial( _create_http_connection, self, compat_http_client.HTTPConnection, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request new_req = req_type( url_escaped, data=req.data, headers=req.headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) new_req.timeout = req.timeout req = new_req for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) req.headers = handle_youtubedl_headers(req.headers) if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname return self.do_open(functools.partial( _create_http_connection, self, self._https_conn_class, True), req, **kwargs) class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: m = re.search( r'(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group(0))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) try: date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) except ValueError: pass def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # %z (UTC offset) is only supported in python>=3.2 if not re.match(r'^[0-9]{1,2}-[0-9]{1,2}-[0-9]{4}$', date_str): date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) format_expressions = [ '%d %B %Y', '%d %b %Y', '%B %d %Y', '%b %d %Y', '%b %dst %Y %I:%M%p', '%b %dnd %Y %I:%M%p', '%b %dth %Y %I:%M%p', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', ] if day_first: format_expressions.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) else: format_expressions.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) for expression in format_expressions: try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') if upload_date is not None: return compat_str(upload_date) def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess elif guess.rstrip('/') in ( 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', 'flv', 'f4v', 'f4a', 'f4b', 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', 'mkv', 'mka', 'mk3d', 'avi', 'divx', 'mov', 'asf', 'wmv', 'wma', '3gp', '3g2', 'mp3', 'flac', 'ape', 'wav', 'f4f', 'f4m', 'm3u8', 'smil'): return guess.rstrip('/') else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad aproximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, "%Y%m%d").date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( (b"GetStdHandle", ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)((b"WriteConsoleW", ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b"GetFileType", ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( (b"GetConsoleMode", ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ sdata = compat_urllib_parse.urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and inofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, } units_re = '|'.join(re.escape(u) for u in _UNIT_TABLE) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = _UNIT_TABLE[m.group('unit')] return int(float(num_str) * mult) def month_by_name(name): """ Return the number of a month by (locale-independently) English name """ try: return ENGLISH_MONTH_NAMES.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) try: libc = ctypes.cdll.LoadLibrary("libc.so.6") except OSError: return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): if s.startswith(start): return s[len(start):] return s def remove_end(s, end): if s.endswith(end): return s[:-len(end)] return s def remove_quotes(s): if s is None or len(s) < 2: return s for quote in ('"', "'", ): if s[0] == quote and s[-1] == quote: return s[1:-1] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] class HEADRequest(compat_urllib_request.Request): def get_method(self): return "HEAD" def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() m = re.match( r'''(?ix)(?:P?T)? (?: (?P<only_mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*| (?P<only_hours>[0-9.]+)\s*(?:hours?)| \s*(?P<hours_reversed>[0-9]+)\s*(?:[:h]|hours?)\s*(?P<mins_reversed>[0-9]+)\s*(?:[:m]|mins?\.?|minutes?)\s*| (?: (?: (?:(?P<days>[0-9]+)\s*(?:[:d]|days?)\s*)? (?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s* )? (?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s* )? (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)? )$''', s) if not m: return None res = 0 if m.group('only_mins'): return float_or_none(m.group('only_mins'), invscale=60) if m.group('only_hours'): return float_or_none(m.group('only_hours'), invscale=60 * 60) if m.group('secs'): res += int(m.group('secs')) if m.group('mins_reversed'): res += int(m.group('mins_reversed')) * 60 if m.group('mins'): res += int(m.group('mins')) * 60 if m.group('hours'): res += int(m.group('hours')) * 60 * 60 if m.group('hours_reversed'): res += int(m.group('hours_reversed')) * 60 * 60 if m.group('days'): res += int(m.group('days')) * 24 * 60 * 60 if m.group('ms'): res += float(m.group('ms')) return res def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize): self._pagefunc = pagefunc self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = list(self._pagefunc(pagenum)) startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() try: struct.pack('!I', 0) except TypeError: # In Python 2.6 (and some 2.7 versions), struct requires a bytes argument def struct_pack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.pack(spec, *args) def struct_unpack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.unpack(spec, *args) else: struct_pack = struct.pack struct_unpack = struct.unpack def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii') def encode_dict(d, encoding='utf-8'): def encode(v): return v.encode(encoding) if isinstance(v, compat_basestring) else v return dict((encode(k), encode(v)) for k, v in d.items()) US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } def parse_age_limit(s): if s is None: return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) return int(m.group('age')) if m else US_RATINGS.get(s, None) def strip_jsonp(code): return re.sub( r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code) def js_to_json(code): def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v if v.startswith('"'): v = re.sub(r"\\'", "'", v[1:-1]) elif v.startswith("'"): v = v[1:-1] v = re.sub(r"\\\\|\\'|\"", lambda m: { '\\\\': '\\\\', "\\'": "'", '"': '\\"', }[m.group(0)], v) return '"%s"' % v res = re.sub(r'''(?x) "(?:[^"\\]*(?:\\\\|\\['"nu]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nu]))*[^'\\]*'| [a-zA-Z_][.a-zA-Z_0-9]* ''', fix_kv, code) res = re.sub(r',(\s*[\]}])', lambda m: m.group(1), res) return res def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(shlex_quote(a) for a in args) def mimetype2ext(mt): _, _, res = mt.rpartition('/') return { 'x-ms-wmv': 'wmv', 'x-mp4-fragmented': 'mp4', 'ttml+xml': 'ttml', }.get(res, res) def urlhandle_detect_ext(url_handle): try: url_handle.headers getheader = lambda h: url_handle.headers[h] except AttributeError: # Python < 3 getheader = url_handle.info().getheader cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] if m.group('strval') is not None: if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('strval') else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) actual_value = dct.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:\.\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3)) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): _x = functools.partial(xpath_with_ns, ns_map={ 'ttml': 'http://www.w3.org/ns/ttml', 'ttaf1': 'http://www.w3.org/2006/10/ttaf1', }) def parse_node(node): str_or_empty = functools.partial(str_or_none, default='') out = str_or_empty(node.text) for child in node: if child.tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'): out += '\n' + str_or_empty(child.tail) elif child.tag in (_x('ttml:span'), _x('ttaf1:span'), 'span'): out += str_or_empty(parse_node(child)) else: out += str_or_empty(xml.etree.ElementTree.tostring(child)) return out dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8')) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) dur = parse_dfxp_time_expr(para.attrib.get('dur')) if begin_time is None: continue if not end_time: if not dur: continue end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) return compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type)
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-6-fixed/youtube-dl/youtube_dl/utils.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-6-buggy/youtube-dl/youtube_dl/utils.py
youtube-dl-bug-16
from __future__ import unicode_literals import io import os import subprocess import time import re from .common import AudioConversionError, PostProcessor from ..compat import ( compat_subprocess_get_DEVNULL, ) from ..utils import ( encodeArgument, encodeFilename, get_exe_version, is_outdated_version, PostProcessingError, prepend_extension, shell_quote, subtitles_filename, dfxp2srt, ISO639Utils, replace_extension, ) EXT_TO_OUT_FORMATS = { 'aac': 'adts', 'flac': 'flac', 'm4a': 'ipod', 'mka': 'matroska', 'mkv': 'matroska', 'mpg': 'mpeg', 'ogv': 'ogg', 'ts': 'mpegts', 'wma': 'asf', 'wmv': 'asf', } ACODECS = { 'mp3': 'libmp3lame', 'aac': 'aac', 'flac': 'flac', 'm4a': 'aac', 'opus': 'opus', 'vorbis': 'libvorbis', 'wav': None, } class FFmpegPostProcessorError(PostProcessingError): pass class FFmpegPostProcessor(PostProcessor): def __init__(self, downloader=None): PostProcessor.__init__(self, downloader) self._determine_executables() def check_version(self): if not self.available: raise FFmpegPostProcessorError('ffmpeg or avconv not found. Please install one.') required_version = '10-0' if self.basename == 'avconv' else '1.0' if is_outdated_version( self._versions[self.basename], required_version): warning = 'Your copy of %s is outdated, update %s to version %s or newer if you encounter any errors.' % ( self.basename, self.basename, required_version) if self._downloader: self._downloader.report_warning(warning) @staticmethod def get_versions(downloader=None): return FFmpegPostProcessor(downloader)._versions def _determine_executables(self): programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe'] prefer_ffmpeg = False self.basename = None self.probe_basename = None self._paths = None self._versions = None if self._downloader: prefer_ffmpeg = self._downloader.params.get('prefer_ffmpeg', False) location = self._downloader.params.get('ffmpeg_location') if location is not None: if not os.path.exists(location): self._downloader.report_warning( 'ffmpeg-location %s does not exist! ' 'Continuing without avconv/ffmpeg.' % (location)) self._versions = {} return elif not os.path.isdir(location): basename = os.path.splitext(os.path.basename(location))[0] if basename not in programs: self._downloader.report_warning( 'Cannot identify executable %s, its basename should be one of %s. ' 'Continuing without avconv/ffmpeg.' % (location, ', '.join(programs))) self._versions = {} return None location = os.path.dirname(os.path.abspath(location)) if basename in ('ffmpeg', 'ffprobe'): prefer_ffmpeg = True self._paths = dict( (p, os.path.join(location, p)) for p in programs) self._versions = dict( (p, get_exe_version(self._paths[p], args=['-version'])) for p in programs) if self._versions is None: self._versions = dict( (p, get_exe_version(p, args=['-version'])) for p in programs) self._paths = dict((p, p) for p in programs) if prefer_ffmpeg: prefs = ('ffmpeg', 'avconv') else: prefs = ('avconv', 'ffmpeg') for p in prefs: if self._versions[p]: self.basename = p break if prefer_ffmpeg: prefs = ('ffprobe', 'avprobe') else: prefs = ('avprobe', 'ffprobe') for p in prefs: if self._versions[p]: self.probe_basename = p break @property def available(self): return self.basename is not None @property def executable(self): return self._paths[self.basename] @property def probe_available(self): return self.probe_basename is not None @property def probe_executable(self): return self._paths[self.probe_basename] def get_audio_codec(self, path): if not self.probe_available: raise PostProcessingError('ffprobe or avprobe not found. Please install one.') try: cmd = [ encodeFilename(self.probe_executable, True), encodeArgument('-show_streams'), encodeFilename(self._ffmpeg_filename_argument(path), True)] if self._downloader.params.get('verbose', False): self._downloader.to_screen('[debug] %s command line: %s' % (self.basename, shell_quote(cmd))) handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE, stdin=subprocess.PIPE) output = handle.communicate()[0] if handle.wait() != 0: return None except (IOError, OSError): return None audio_codec = None for line in output.decode('ascii', 'ignore').split('\n'): if line.startswith('codec_name='): audio_codec = line.split('=')[1].strip() elif line.strip() == 'codec_type=audio' and audio_codec is not None: return audio_codec return None def run_ffmpeg_multiple_files(self, input_paths, out_path, opts): self.check_version() oldest_mtime = min( os.stat(encodeFilename(path)).st_mtime for path in input_paths) opts += self._configuration_args() files_cmd = [] for path in input_paths: files_cmd.extend([ encodeArgument('-i'), encodeFilename(self._ffmpeg_filename_argument(path), True) ]) cmd = ([encodeFilename(self.executable, True), encodeArgument('-y')] + files_cmd + [encodeArgument(o) for o in opts] + [encodeFilename(self._ffmpeg_filename_argument(out_path), True)]) if self._downloader.params.get('verbose', False): self._downloader.to_screen('[debug] ffmpeg command line: %s' % shell_quote(cmd)) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: stderr = stderr.decode('utf-8', 'replace') msg = stderr.strip().split('\n')[-1] raise FFmpegPostProcessorError(msg) self.try_utime(out_path, oldest_mtime, oldest_mtime) def run_ffmpeg(self, path, out_path, opts): self.run_ffmpeg_multiple_files([path], out_path, opts) def _ffmpeg_filename_argument(self, fn): # Always use 'file:' because the filename may contain ':' (ffmpeg # interprets that as a protocol) or can start with '-' (-- is broken in # ffmpeg, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details) # Also leave '-' intact in order not to break streaming to stdout. return 'file:' + fn if fn != '-' else fn class FFmpegExtractAudioPP(FFmpegPostProcessor): def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, nopostoverwrites=False): FFmpegPostProcessor.__init__(self, downloader) if preferredcodec is None: preferredcodec = 'best' self._preferredcodec = preferredcodec self._preferredquality = preferredquality self._nopostoverwrites = nopostoverwrites def run_ffmpeg(self, path, out_path, codec, more_opts): if codec is None: acodec_opts = [] else: acodec_opts = ['-acodec', codec] opts = ['-vn'] + acodec_opts + more_opts try: FFmpegPostProcessor.run_ffmpeg(self, path, out_path, opts) except FFmpegPostProcessorError as err: raise AudioConversionError(err.msg) def run(self, information): path = information['filepath'] filecodec = self.get_audio_codec(path) if filecodec is None: raise PostProcessingError('WARNING: unable to obtain file audio codec with ffprobe') more_opts = [] if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'): if filecodec == 'aac' and self._preferredcodec in ['m4a', 'best']: # Lossless, but in another container acodec = 'copy' extension = 'm4a' more_opts = ['-bsf:a', 'aac_adtstoasc'] elif filecodec in ['aac', 'flac', 'mp3', 'vorbis', 'opus']: # Lossless if possible acodec = 'copy' extension = filecodec if filecodec == 'aac': more_opts = ['-f', 'adts'] if filecodec == 'vorbis': extension = 'ogg' else: # MP3 otherwise. acodec = 'libmp3lame' extension = 'mp3' more_opts = [] if self._preferredquality is not None: if int(self._preferredquality) < 10: more_opts += ['-q:a', self._preferredquality] else: more_opts += ['-b:a', self._preferredquality + 'k'] else: # We convert the audio (lossy if codec is lossy) acodec = ACODECS[self._preferredcodec] extension = self._preferredcodec more_opts = [] if self._preferredquality is not None: # The opus codec doesn't support the -aq option if int(self._preferredquality) < 10 and extension != 'opus': more_opts += ['-q:a', self._preferredquality] else: more_opts += ['-b:a', self._preferredquality + 'k'] if self._preferredcodec == 'aac': more_opts += ['-f', 'adts'] if self._preferredcodec == 'm4a': more_opts += ['-bsf:a', 'aac_adtstoasc'] if self._preferredcodec == 'vorbis': extension = 'ogg' if self._preferredcodec == 'wav': extension = 'wav' more_opts += ['-f', 'wav'] prefix, sep, ext = path.rpartition('.') # not os.path.splitext, since the latter does not work on unicode in all setups new_path = prefix + sep + extension information['filepath'] = new_path information['ext'] = extension # If we download foo.mp3 and convert it to... foo.mp3, then don't delete foo.mp3, silly. if (new_path == path or (self._nopostoverwrites and os.path.exists(encodeFilename(new_path)))): self._downloader.to_screen('[ffmpeg] Post-process file %s exists, skipping' % new_path) return [], information try: self._downloader.to_screen('[ffmpeg] Destination: ' + new_path) self.run_ffmpeg(path, new_path, acodec, more_opts) except AudioConversionError as e: raise PostProcessingError( 'audio conversion failed: ' + e.msg) except Exception: raise PostProcessingError('error running ' + self.basename) # Try to update the date time for extracted audio file. if information.get('filetime') is not None: self.try_utime( new_path, time.time(), information['filetime'], errnote='Cannot update utime of audio file') return [path], information class FFmpegVideoConvertorPP(FFmpegPostProcessor): def __init__(self, downloader=None, preferedformat=None): super(FFmpegVideoConvertorPP, self).__init__(downloader) self._preferedformat = preferedformat def run(self, information): path = information['filepath'] if information['ext'] == self._preferedformat: self._downloader.to_screen('[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat)) return [], information options = [] if self._preferedformat == 'avi': options.extend(['-c:v', 'libxvid', '-vtag', 'XVID']) prefix, sep, ext = path.rpartition('.') outpath = prefix + sep + self._preferedformat self._downloader.to_screen('[' + 'ffmpeg' + '] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) + outpath) self.run_ffmpeg(path, outpath, options) information['filepath'] = outpath information['format'] = self._preferedformat information['ext'] = self._preferedformat return [path], information class FFmpegEmbedSubtitlePP(FFmpegPostProcessor): def run(self, information): if information['ext'] not in ('mp4', 'webm', 'mkv'): self._downloader.to_screen('[ffmpeg] Subtitles can only be embedded in mp4, webm or mkv files') return [], information subtitles = information.get('requested_subtitles') if not subtitles: self._downloader.to_screen('[ffmpeg] There aren\'t any subtitles to embed') return [], information filename = information['filepath'] ext = information['ext'] sub_langs = [] sub_filenames = [] webm_vtt_warn = False for lang, sub_info in subtitles.items(): sub_ext = sub_info['ext'] if ext != 'webm' or ext == 'webm' and sub_ext == 'vtt': sub_langs.append(lang) sub_filenames.append(subtitles_filename(filename, lang, sub_ext)) else: if not webm_vtt_warn and ext == 'webm' and sub_ext != 'vtt': webm_vtt_warn = True self._downloader.to_screen('[ffmpeg] Only WebVTT subtitles can be embedded in webm files') if not sub_langs: return [], information input_files = [filename] + sub_filenames opts = [ '-map', '0', '-c', 'copy', # Don't copy the existing subtitles, we may be running the # postprocessor a second time '-map', '-0:s', ] if information['ext'] == 'mp4': opts += ['-c:s', 'mov_text'] for (i, lang) in enumerate(sub_langs): opts.extend(['-map', '%d:0' % (i + 1)]) lang_code = ISO639Utils.short2long(lang) if lang_code is not None: opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code]) temp_filename = prepend_extension(filename, 'temp') self._downloader.to_screen('[ffmpeg] Embedding subtitles in \'%s\'' % filename) self.run_ffmpeg_multiple_files(input_files, temp_filename, opts) os.remove(encodeFilename(filename)) os.rename(encodeFilename(temp_filename), encodeFilename(filename)) return sub_filenames, information class FFmpegMetadataPP(FFmpegPostProcessor): def run(self, info): metadata = {} def add(meta_list, info_list=None): if not info_list: info_list = meta_list if not isinstance(meta_list, (list, tuple)): meta_list = (meta_list,) if not isinstance(info_list, (list, tuple)): info_list = (info_list,) for info_f in info_list: if info.get(info_f) is not None: for meta_f in meta_list: metadata[meta_f] = info[info_f] break add('title', ('track', 'title')) add('date', 'upload_date') add(('description', 'comment'), 'description') add('purl', 'webpage_url') add('track', 'track_number') add('artist', ('artist', 'creator', 'uploader', 'uploader_id')) add('genre') add('album') add('album_artist') add('disc', 'disc_number') if not metadata: self._downloader.to_screen('[ffmpeg] There isn\'t any metadata to add') return [], info filename = info['filepath'] temp_filename = prepend_extension(filename, 'temp') in_filenames = [filename] options = [] if info['ext'] == 'm4a': options.extend(['-vn', '-acodec', 'copy']) else: options.extend(['-c', 'copy']) for (name, value) in metadata.items(): options.extend(['-metadata', '%s=%s' % (name, value)]) chapters = info.get('chapters', []) if chapters: metadata_filename = replace_extension(filename, 'meta') with io.open(metadata_filename, 'wt', encoding='utf-8') as f: def ffmpeg_escape(text): return re.sub(r'(=|;|#|\\|\n)', r'\\\1', text) metadata_file_content = ';FFMETADATA1\n' for chapter in chapters: metadata_file_content += '[CHAPTER]\nTIMEBASE=1/1000\n' metadata_file_content += 'START=%d\n' % (chapter['start_time'] * 1000) metadata_file_content += 'END=%d\n' % (chapter['end_time'] * 1000) chapter_title = chapter.get('title') if chapter_title: metadata_file_content += 'title=%s\n' % ffmpeg_escape(chapter_title) f.write(metadata_file_content) in_filenames.append(metadata_filename) options.extend(['-map_metadata', '1']) self._downloader.to_screen('[ffmpeg] Adding metadata to \'%s\'' % filename) self.run_ffmpeg_multiple_files(in_filenames, temp_filename, options) if chapters: os.remove(metadata_filename) os.remove(encodeFilename(filename)) os.rename(encodeFilename(temp_filename), encodeFilename(filename)) return [], info class FFmpegMergerPP(FFmpegPostProcessor): def run(self, info): filename = info['filepath'] temp_filename = prepend_extension(filename, 'temp') args = ['-c', 'copy', '-map', '0:v:0', '-map', '1:a:0'] self._downloader.to_screen('[ffmpeg] Merging formats into "%s"' % filename) self.run_ffmpeg_multiple_files(info['__files_to_merge'], temp_filename, args) os.rename(encodeFilename(temp_filename), encodeFilename(filename)) return info['__files_to_merge'], info def can_merge(self): # TODO: figure out merge-capable ffmpeg version if self.basename != 'avconv': return True required_version = '10-0' if is_outdated_version( self._versions[self.basename], required_version): warning = ('Your copy of %s is outdated and unable to properly mux separate video and audio files, ' 'youtube-dl will download single file media. ' 'Update %s to version %s or newer to fix this.') % ( self.basename, self.basename, required_version) if self._downloader: self._downloader.report_warning(warning) return False return True class FFmpegFixupStretchedPP(FFmpegPostProcessor): def run(self, info): stretched_ratio = info.get('stretched_ratio') if stretched_ratio is None or stretched_ratio == 1: return [], info filename = info['filepath'] temp_filename = prepend_extension(filename, 'temp') options = ['-c', 'copy', '-aspect', '%f' % stretched_ratio] self._downloader.to_screen('[ffmpeg] Fixing aspect ratio in "%s"' % filename) self.run_ffmpeg(filename, temp_filename, options) os.remove(encodeFilename(filename)) os.rename(encodeFilename(temp_filename), encodeFilename(filename)) return [], info class FFmpegFixupM4aPP(FFmpegPostProcessor): def run(self, info): if info.get('container') != 'm4a_dash': return [], info filename = info['filepath'] temp_filename = prepend_extension(filename, 'temp') options = ['-c', 'copy', '-f', 'mp4'] self._downloader.to_screen('[ffmpeg] Correcting container in "%s"' % filename) self.run_ffmpeg(filename, temp_filename, options) os.remove(encodeFilename(filename)) os.rename(encodeFilename(temp_filename), encodeFilename(filename)) return [], info class FFmpegFixupM3u8PP(FFmpegPostProcessor): def run(self, info): filename = info['filepath'] if self.get_audio_codec(filename) == 'aac': temp_filename = prepend_extension(filename, 'temp') options = ['-c', 'copy', '-f', 'mp4', '-bsf:a', 'aac_adtstoasc'] self._downloader.to_screen('[ffmpeg] Fixing malformed AAC bitstream in "%s"' % filename) self.run_ffmpeg(filename, temp_filename, options) os.remove(encodeFilename(filename)) os.rename(encodeFilename(temp_filename), encodeFilename(filename)) return [], info class FFmpegSubtitlesConvertorPP(FFmpegPostProcessor): def __init__(self, downloader=None, format=None): super(FFmpegSubtitlesConvertorPP, self).__init__(downloader) self.format = format def run(self, info): subs = info.get('requested_subtitles') filename = info['filepath'] new_ext = self.format new_format = new_ext if new_format == 'vtt': new_format = 'webvtt' if subs is None: self._downloader.to_screen('[ffmpeg] There aren\'t any subtitles to convert') return [], info self._downloader.to_screen('[ffmpeg] Converting subtitles') sub_filenames = [] for lang, sub in subs.items(): ext = sub['ext'] if ext == new_ext: self._downloader.to_screen( '[ffmpeg] Subtitle file for %s is already in the requested format' % new_ext) continue old_file = subtitles_filename(filename, lang, ext) sub_filenames.append(old_file) new_file = subtitles_filename(filename, lang, new_ext) if ext in ('dfxp', 'ttml', 'tt'): self._downloader.report_warning( 'You have requested to convert dfxp (TTML) subtitles into another format, ' 'which results in style information loss') dfxp_file = old_file srt_file = subtitles_filename(filename, lang, 'srt') with io.open(dfxp_file, 'rt', encoding='utf-8') as f: srt_data = dfxp2srt(f.read()) with io.open(srt_file, 'wt', encoding='utf-8') as f: f.write(srt_data) old_file = srt_file subs[lang] = { 'ext': 'srt', 'data': srt_data } if new_ext == 'srt': continue else: sub_filenames.append(srt_file) self.run_ffmpeg(old_file, new_file, ['-f', new_format]) with io.open(new_file, 'rt', encoding='utf-8') as f: subs[lang] = { 'ext': new_ext, 'data': f.read(), } return sub_filenames, info from __future__ import unicode_literals import io import os import subprocess import time import re from .common import AudioConversionError, PostProcessor from ..compat import ( compat_subprocess_get_DEVNULL, ) from ..utils import ( encodeArgument, encodeFilename, get_exe_version, is_outdated_version, PostProcessingError, prepend_extension, shell_quote, subtitles_filename, dfxp2srt, ISO639Utils, replace_extension, ) EXT_TO_OUT_FORMATS = { 'aac': 'adts', 'flac': 'flac', 'm4a': 'ipod', 'mka': 'matroska', 'mkv': 'matroska', 'mpg': 'mpeg', 'ogv': 'ogg', 'ts': 'mpegts', 'wma': 'asf', 'wmv': 'asf', } ACODECS = { 'mp3': 'libmp3lame', 'aac': 'aac', 'flac': 'flac', 'm4a': 'aac', 'opus': 'opus', 'vorbis': 'libvorbis', 'wav': None, } class FFmpegPostProcessorError(PostProcessingError): pass class FFmpegPostProcessor(PostProcessor): def __init__(self, downloader=None): PostProcessor.__init__(self, downloader) self._determine_executables() def check_version(self): if not self.available: raise FFmpegPostProcessorError('ffmpeg or avconv not found. Please install one.') required_version = '10-0' if self.basename == 'avconv' else '1.0' if is_outdated_version( self._versions[self.basename], required_version): warning = 'Your copy of %s is outdated, update %s to version %s or newer if you encounter any errors.' % ( self.basename, self.basename, required_version) if self._downloader: self._downloader.report_warning(warning) @staticmethod def get_versions(downloader=None): return FFmpegPostProcessor(downloader)._versions def _determine_executables(self): programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe'] prefer_ffmpeg = False self.basename = None self.probe_basename = None self._paths = None self._versions = None if self._downloader: prefer_ffmpeg = self._downloader.params.get('prefer_ffmpeg', False) location = self._downloader.params.get('ffmpeg_location') if location is not None: if not os.path.exists(location): self._downloader.report_warning( 'ffmpeg-location %s does not exist! ' 'Continuing without avconv/ffmpeg.' % (location)) self._versions = {} return elif not os.path.isdir(location): basename = os.path.splitext(os.path.basename(location))[0] if basename not in programs: self._downloader.report_warning( 'Cannot identify executable %s, its basename should be one of %s. ' 'Continuing without avconv/ffmpeg.' % (location, ', '.join(programs))) self._versions = {} return None location = os.path.dirname(os.path.abspath(location)) if basename in ('ffmpeg', 'ffprobe'): prefer_ffmpeg = True self._paths = dict( (p, os.path.join(location, p)) for p in programs) self._versions = dict( (p, get_exe_version(self._paths[p], args=['-version'])) for p in programs) if self._versions is None: self._versions = dict( (p, get_exe_version(p, args=['-version'])) for p in programs) self._paths = dict((p, p) for p in programs) if prefer_ffmpeg: prefs = ('ffmpeg', 'avconv') else: prefs = ('avconv', 'ffmpeg') for p in prefs: if self._versions[p]: self.basename = p break if prefer_ffmpeg: prefs = ('ffprobe', 'avprobe') else: prefs = ('avprobe', 'ffprobe') for p in prefs: if self._versions[p]: self.probe_basename = p break @property def available(self): return self.basename is not None @property def executable(self): return self._paths[self.basename] @property def probe_available(self): return self.probe_basename is not None @property def probe_executable(self): return self._paths[self.probe_basename] def get_audio_codec(self, path): if not self.probe_available: raise PostProcessingError('ffprobe or avprobe not found. Please install one.') try: cmd = [ encodeFilename(self.probe_executable, True), encodeArgument('-show_streams'), encodeFilename(self._ffmpeg_filename_argument(path), True)] if self._downloader.params.get('verbose', False): self._downloader.to_screen('[debug] %s command line: %s' % (self.basename, shell_quote(cmd))) handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE, stdin=subprocess.PIPE) output = handle.communicate()[0] if handle.wait() != 0: return None except (IOError, OSError): return None audio_codec = None for line in output.decode('ascii', 'ignore').split('\n'): if line.startswith('codec_name='): audio_codec = line.split('=')[1].strip() elif line.strip() == 'codec_type=audio' and audio_codec is not None: return audio_codec return None def run_ffmpeg_multiple_files(self, input_paths, out_path, opts): self.check_version() oldest_mtime = min( os.stat(encodeFilename(path)).st_mtime for path in input_paths) opts += self._configuration_args() files_cmd = [] for path in input_paths: files_cmd.extend([ encodeArgument('-i'), encodeFilename(self._ffmpeg_filename_argument(path), True) ]) cmd = ([encodeFilename(self.executable, True), encodeArgument('-y')] + files_cmd + [encodeArgument(o) for o in opts] + [encodeFilename(self._ffmpeg_filename_argument(out_path), True)]) if self._downloader.params.get('verbose', False): self._downloader.to_screen('[debug] ffmpeg command line: %s' % shell_quote(cmd)) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: stderr = stderr.decode('utf-8', 'replace') msg = stderr.strip().split('\n')[-1] raise FFmpegPostProcessorError(msg) self.try_utime(out_path, oldest_mtime, oldest_mtime) def run_ffmpeg(self, path, out_path, opts): self.run_ffmpeg_multiple_files([path], out_path, opts) def _ffmpeg_filename_argument(self, fn): # Always use 'file:' because the filename may contain ':' (ffmpeg # interprets that as a protocol) or can start with '-' (-- is broken in # ffmpeg, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details) # Also leave '-' intact in order not to break streaming to stdout. return 'file:' + fn if fn != '-' else fn class FFmpegExtractAudioPP(FFmpegPostProcessor): def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, nopostoverwrites=False): FFmpegPostProcessor.__init__(self, downloader) if preferredcodec is None: preferredcodec = 'best' self._preferredcodec = preferredcodec self._preferredquality = preferredquality self._nopostoverwrites = nopostoverwrites def run_ffmpeg(self, path, out_path, codec, more_opts): if codec is None: acodec_opts = [] else: acodec_opts = ['-acodec', codec] opts = ['-vn'] + acodec_opts + more_opts try: FFmpegPostProcessor.run_ffmpeg(self, path, out_path, opts) except FFmpegPostProcessorError as err: raise AudioConversionError(err.msg) def run(self, information): path = information['filepath'] filecodec = self.get_audio_codec(path) if filecodec is None: raise PostProcessingError('WARNING: unable to obtain file audio codec with ffprobe') more_opts = [] if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'): if filecodec == 'aac' and self._preferredcodec in ['m4a', 'best']: # Lossless, but in another container acodec = 'copy' extension = 'm4a' more_opts = ['-bsf:a', 'aac_adtstoasc'] elif filecodec in ['aac', 'flac', 'mp3', 'vorbis', 'opus']: # Lossless if possible acodec = 'copy' extension = filecodec if filecodec == 'aac': more_opts = ['-f', 'adts'] if filecodec == 'vorbis': extension = 'ogg' else: # MP3 otherwise. acodec = 'libmp3lame' extension = 'mp3' more_opts = [] if self._preferredquality is not None: if int(self._preferredquality) < 10: more_opts += ['-q:a', self._preferredquality] else: more_opts += ['-b:a', self._preferredquality + 'k'] else: # We convert the audio (lossy if codec is lossy) acodec = ACODECS[self._preferredcodec] extension = self._preferredcodec more_opts = [] if self._preferredquality is not None: # The opus codec doesn't support the -aq option if int(self._preferredquality) < 10 and extension != 'opus': more_opts += ['-q:a', self._preferredquality] else: more_opts += ['-b:a', self._preferredquality + 'k'] if self._preferredcodec == 'aac': more_opts += ['-f', 'adts'] if self._preferredcodec == 'm4a': more_opts += ['-bsf:a', 'aac_adtstoasc'] if self._preferredcodec == 'vorbis': extension = 'ogg' if self._preferredcodec == 'wav': extension = 'wav' more_opts += ['-f', 'wav'] prefix, sep, ext = path.rpartition('.') # not os.path.splitext, since the latter does not work on unicode in all setups new_path = prefix + sep + extension information['filepath'] = new_path information['ext'] = extension # If we download foo.mp3 and convert it to... foo.mp3, then don't delete foo.mp3, silly. if (new_path == path or (self._nopostoverwrites and os.path.exists(encodeFilename(new_path)))): self._downloader.to_screen('[ffmpeg] Post-process file %s exists, skipping' % new_path) return [], information try: self._downloader.to_screen('[ffmpeg] Destination: ' + new_path) self.run_ffmpeg(path, new_path, acodec, more_opts) except AudioConversionError as e: raise PostProcessingError( 'audio conversion failed: ' + e.msg) except Exception: raise PostProcessingError('error running ' + self.basename) # Try to update the date time for extracted audio file. if information.get('filetime') is not None: self.try_utime( new_path, time.time(), information['filetime'], errnote='Cannot update utime of audio file') return [path], information class FFmpegVideoConvertorPP(FFmpegPostProcessor): def __init__(self, downloader=None, preferedformat=None): super(FFmpegVideoConvertorPP, self).__init__(downloader) self._preferedformat = preferedformat def run(self, information): path = information['filepath'] if information['ext'] == self._preferedformat: self._downloader.to_screen('[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat)) return [], information options = [] if self._preferedformat == 'avi': options.extend(['-c:v', 'libxvid', '-vtag', 'XVID']) prefix, sep, ext = path.rpartition('.') outpath = prefix + sep + self._preferedformat self._downloader.to_screen('[' + 'ffmpeg' + '] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) + outpath) self.run_ffmpeg(path, outpath, options) information['filepath'] = outpath information['format'] = self._preferedformat information['ext'] = self._preferedformat return [path], information class FFmpegEmbedSubtitlePP(FFmpegPostProcessor): def run(self, information): if information['ext'] not in ('mp4', 'webm', 'mkv'): self._downloader.to_screen('[ffmpeg] Subtitles can only be embedded in mp4, webm or mkv files') return [], information subtitles = information.get('requested_subtitles') if not subtitles: self._downloader.to_screen('[ffmpeg] There aren\'t any subtitles to embed') return [], information filename = information['filepath'] ext = information['ext'] sub_langs = [] sub_filenames = [] webm_vtt_warn = False for lang, sub_info in subtitles.items(): sub_ext = sub_info['ext'] if ext != 'webm' or ext == 'webm' and sub_ext == 'vtt': sub_langs.append(lang) sub_filenames.append(subtitles_filename(filename, lang, sub_ext)) else: if not webm_vtt_warn and ext == 'webm' and sub_ext != 'vtt': webm_vtt_warn = True self._downloader.to_screen('[ffmpeg] Only WebVTT subtitles can be embedded in webm files') if not sub_langs: return [], information input_files = [filename] + sub_filenames opts = [ '-map', '0', '-c', 'copy', # Don't copy the existing subtitles, we may be running the # postprocessor a second time '-map', '-0:s', ] if information['ext'] == 'mp4': opts += ['-c:s', 'mov_text'] for (i, lang) in enumerate(sub_langs): opts.extend(['-map', '%d:0' % (i + 1)]) lang_code = ISO639Utils.short2long(lang) if lang_code is not None: opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code]) temp_filename = prepend_extension(filename, 'temp') self._downloader.to_screen('[ffmpeg] Embedding subtitles in \'%s\'' % filename) self.run_ffmpeg_multiple_files(input_files, temp_filename, opts) os.remove(encodeFilename(filename)) os.rename(encodeFilename(temp_filename), encodeFilename(filename)) return sub_filenames, information class FFmpegMetadataPP(FFmpegPostProcessor): def run(self, info): metadata = {} def add(meta_list, info_list=None): if not info_list: info_list = meta_list if not isinstance(meta_list, (list, tuple)): meta_list = (meta_list,) if not isinstance(info_list, (list, tuple)): info_list = (info_list,) for info_f in info_list: if info.get(info_f) is not None: for meta_f in meta_list: metadata[meta_f] = info[info_f] break add('title', ('track', 'title')) add('date', 'upload_date') add(('description', 'comment'), 'description') add('purl', 'webpage_url') add('track', 'track_number') add('artist', ('artist', 'creator', 'uploader', 'uploader_id')) add('genre') add('album') add('album_artist') add('disc', 'disc_number') if not metadata: self._downloader.to_screen('[ffmpeg] There isn\'t any metadata to add') return [], info filename = info['filepath'] temp_filename = prepend_extension(filename, 'temp') in_filenames = [filename] options = [] if info['ext'] == 'm4a': options.extend(['-vn', '-acodec', 'copy']) else: options.extend(['-c', 'copy']) for (name, value) in metadata.items(): options.extend(['-metadata', '%s=%s' % (name, value)]) chapters = info.get('chapters', []) if chapters: metadata_filename = replace_extension(filename, 'meta') with io.open(metadata_filename, 'wt', encoding='utf-8') as f: def ffmpeg_escape(text): return re.sub(r'(=|;|#|\\|\n)', r'\\\1', text) metadata_file_content = ';FFMETADATA1\n' for chapter in chapters: metadata_file_content += '[CHAPTER]\nTIMEBASE=1/1000\n' metadata_file_content += 'START=%d\n' % (chapter['start_time'] * 1000) metadata_file_content += 'END=%d\n' % (chapter['end_time'] * 1000) chapter_title = chapter.get('title') if chapter_title: metadata_file_content += 'title=%s\n' % ffmpeg_escape(chapter_title) f.write(metadata_file_content) in_filenames.append(metadata_filename) options.extend(['-map_metadata', '1']) self._downloader.to_screen('[ffmpeg] Adding metadata to \'%s\'' % filename) self.run_ffmpeg_multiple_files(in_filenames, temp_filename, options) if chapters: os.remove(metadata_filename) os.remove(encodeFilename(filename)) os.rename(encodeFilename(temp_filename), encodeFilename(filename)) return [], info class FFmpegMergerPP(FFmpegPostProcessor): def run(self, info): filename = info['filepath'] temp_filename = prepend_extension(filename, 'temp') args = ['-c', 'copy', '-map', '0:v:0', '-map', '1:a:0'] self._downloader.to_screen('[ffmpeg] Merging formats into "%s"' % filename) self.run_ffmpeg_multiple_files(info['__files_to_merge'], temp_filename, args) os.rename(encodeFilename(temp_filename), encodeFilename(filename)) return info['__files_to_merge'], info def can_merge(self): # TODO: figure out merge-capable ffmpeg version if self.basename != 'avconv': return True required_version = '10-0' if is_outdated_version( self._versions[self.basename], required_version): warning = ('Your copy of %s is outdated and unable to properly mux separate video and audio files, ' 'youtube-dl will download single file media. ' 'Update %s to version %s or newer to fix this.') % ( self.basename, self.basename, required_version) if self._downloader: self._downloader.report_warning(warning) return False return True class FFmpegFixupStretchedPP(FFmpegPostProcessor): def run(self, info): stretched_ratio = info.get('stretched_ratio') if stretched_ratio is None or stretched_ratio == 1: return [], info filename = info['filepath'] temp_filename = prepend_extension(filename, 'temp') options = ['-c', 'copy', '-aspect', '%f' % stretched_ratio] self._downloader.to_screen('[ffmpeg] Fixing aspect ratio in "%s"' % filename) self.run_ffmpeg(filename, temp_filename, options) os.remove(encodeFilename(filename)) os.rename(encodeFilename(temp_filename), encodeFilename(filename)) return [], info class FFmpegFixupM4aPP(FFmpegPostProcessor): def run(self, info): if info.get('container') != 'm4a_dash': return [], info filename = info['filepath'] temp_filename = prepend_extension(filename, 'temp') options = ['-c', 'copy', '-f', 'mp4'] self._downloader.to_screen('[ffmpeg] Correcting container in "%s"' % filename) self.run_ffmpeg(filename, temp_filename, options) os.remove(encodeFilename(filename)) os.rename(encodeFilename(temp_filename), encodeFilename(filename)) return [], info class FFmpegFixupM3u8PP(FFmpegPostProcessor): def run(self, info): filename = info['filepath'] if self.get_audio_codec(filename) == 'aac': temp_filename = prepend_extension(filename, 'temp') options = ['-c', 'copy', '-f', 'mp4', '-bsf:a', 'aac_adtstoasc'] self._downloader.to_screen('[ffmpeg] Fixing malformed AAC bitstream in "%s"' % filename) self.run_ffmpeg(filename, temp_filename, options) os.remove(encodeFilename(filename)) os.rename(encodeFilename(temp_filename), encodeFilename(filename)) return [], info class FFmpegSubtitlesConvertorPP(FFmpegPostProcessor): def __init__(self, downloader=None, format=None): super(FFmpegSubtitlesConvertorPP, self).__init__(downloader) self.format = format def run(self, info): subs = info.get('requested_subtitles') filename = info['filepath'] new_ext = self.format new_format = new_ext if new_format == 'vtt': new_format = 'webvtt' if subs is None: self._downloader.to_screen('[ffmpeg] There aren\'t any subtitles to convert') return [], info self._downloader.to_screen('[ffmpeg] Converting subtitles') sub_filenames = [] for lang, sub in subs.items(): ext = sub['ext'] if ext == new_ext: self._downloader.to_screen( '[ffmpeg] Subtitle file for %s is already in the requested format' % new_ext) continue old_file = subtitles_filename(filename, lang, ext) sub_filenames.append(old_file) new_file = subtitles_filename(filename, lang, new_ext) if ext in ('dfxp', 'ttml', 'tt'): self._downloader.report_warning( 'You have requested to convert dfxp (TTML) subtitles into another format, ' 'which results in style information loss') dfxp_file = old_file srt_file = subtitles_filename(filename, lang, 'srt') with open(dfxp_file, 'rb') as f: srt_data = dfxp2srt(f.read()) with io.open(srt_file, 'wt', encoding='utf-8') as f: f.write(srt_data) old_file = srt_file subs[lang] = { 'ext': 'srt', 'data': srt_data } if new_ext == 'srt': continue else: sub_filenames.append(srt_file) self.run_ffmpeg(old_file, new_file, ['-f', new_format]) with io.open(new_file, 'rt', encoding='utf-8') as f: subs[lang] = { 'ext': new_ext, 'data': f.read(), } return sub_filenames, info
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-16-fixed/youtube-dl/youtube_dl/postprocessor/ffmpeg.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-16-buggy/youtube-dl/youtube_dl/postprocessor/ffmpeg.py
youtube-dl-bug-39
from __future__ import unicode_literals import json import re import socket from .common import InfoExtractor from ..utils import ( compat_http_client, compat_str, compat_urllib_error, compat_urllib_parse, compat_urllib_request, urlencode_postdata, ExtractorError, ) class FacebookIE(InfoExtractor): _VALID_URL = r'''(?x) https?://(?:\w+\.)?facebook\.com/ (?:[^#]*?\#!/)? (?:video/video\.php|photo\.php|video\.php|video/embed)\?(?:.*?) (?:v|video_id)=(?P<id>[0-9]+) (?:.*)''' _LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1' _CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1' _NETRC_MACHINE = 'facebook' IE_NAME = 'facebook' _TESTS = [{ 'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf', 'md5': '6a40d33c0eccbb1af76cf0485a052659', 'info_dict': { 'id': '637842556329505', 'ext': 'mp4', 'duration': 38, 'title': 'Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam fin...', } }, { 'url': 'https://www.facebook.com/video.php?v=10204634152394104', 'only_matching': True, }] def _login(self): (useremail, password) = self._get_login_info() if useremail is None: return login_page_req = compat_urllib_request.Request(self._LOGIN_URL) login_page_req.add_header('Cookie', 'locale=en_US') login_page = self._download_webpage(login_page_req, None, note='Downloading login page', errnote='Unable to download login page') lsd = self._search_regex( r'<input type="hidden" name="lsd" value="([^"]*)"', login_page, 'lsd') lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd') login_form = { 'email': useremail, 'pass': password, 'lsd': lsd, 'lgnrnd': lgnrnd, 'next': 'http://facebook.com/home.php', 'default_persistent': '0', 'legacy_return': '1', 'timezone': '-60', 'trynum': '1', } request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded') try: login_results = self._download_webpage(request, None, note='Logging in', errnote='unable to fetch login page') if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None: self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.') return check_form = { 'fb_dtsg': self._search_regex(r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg'), 'h': self._search_regex( r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h'), 'name_action_selected': 'dont_save', } check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form)) check_req.add_header('Content-Type', 'application/x-www-form-urlencoded') check_response = self._download_webpage(check_req, None, note='Confirming login') if re.search(r'id="checkpointSubmitButton"', check_response) is not None: self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self._downloader.report_warning('unable to log in: %s' % compat_str(err)) return def _real_initialize(self): self._login() def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') url = 'https://www.facebook.com/video/video.php?v=%s' % video_id webpage = self._download_webpage(url, video_id) BEFORE = '{swf.addParam(param[0], param[1]);});\n' AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});' m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage) if not m: m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage) if m_msg is not None: raise ExtractorError( 'The video is not available, Facebook said: "%s"' % m_msg.group(1), expected=True) else: raise ExtractorError('Cannot parse data') data = dict(json.loads(m.group(1))) params_raw = compat_urllib_parse.unquote(data['params']) params = json.loads(params_raw) video_data = params['video_data'][0] video_url = video_data.get('hd_src') if not video_url: video_url = video_data['sd_src'] if not video_url: raise ExtractorError('Cannot find video URL') video_title = self._html_search_regex( r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, 'title', fatal=False) if not video_title: video_title = self._html_search_regex( r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>', webpage, 'alternative title', default=None) if len(video_title) > 80 + 3: video_title = video_title[:80] + '...' if not video_title: video_title = 'Facebook video #%s' % video_id return { 'id': video_id, 'title': video_title, 'url': video_url, 'duration': int(video_data['video_duration']), 'thumbnail': video_data['thumbnail_src'], } from __future__ import unicode_literals import json import re import socket from .common import InfoExtractor from ..utils import ( compat_http_client, compat_str, compat_urllib_error, compat_urllib_parse, compat_urllib_request, urlencode_postdata, ExtractorError, limit_length, ) class FacebookIE(InfoExtractor): _VALID_URL = r'''(?x) https?://(?:\w+\.)?facebook\.com/ (?:[^#]*?\#!/)? (?:video/video\.php|photo\.php|video\.php|video/embed)\?(?:.*?) (?:v|video_id)=(?P<id>[0-9]+) (?:.*)''' _LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1' _CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1' _NETRC_MACHINE = 'facebook' IE_NAME = 'facebook' _TESTS = [{ 'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf', 'md5': '6a40d33c0eccbb1af76cf0485a052659', 'info_dict': { 'id': '637842556329505', 'ext': 'mp4', 'duration': 38, 'title': 'Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam fin...', } }, { 'note': 'Video without discernible title', 'url': 'https://www.facebook.com/video.php?v=274175099429670', 'info_dict': { 'id': '274175099429670', 'ext': 'mp4', 'title': 'Facebook video #274175099429670', } }, { 'url': 'https://www.facebook.com/video.php?v=10204634152394104', 'only_matching': True, }] def _login(self): (useremail, password) = self._get_login_info() if useremail is None: return login_page_req = compat_urllib_request.Request(self._LOGIN_URL) login_page_req.add_header('Cookie', 'locale=en_US') login_page = self._download_webpage(login_page_req, None, note='Downloading login page', errnote='Unable to download login page') lsd = self._search_regex( r'<input type="hidden" name="lsd" value="([^"]*)"', login_page, 'lsd') lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd') login_form = { 'email': useremail, 'pass': password, 'lsd': lsd, 'lgnrnd': lgnrnd, 'next': 'http://facebook.com/home.php', 'default_persistent': '0', 'legacy_return': '1', 'timezone': '-60', 'trynum': '1', } request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded') try: login_results = self._download_webpage(request, None, note='Logging in', errnote='unable to fetch login page') if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None: self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.') return check_form = { 'fb_dtsg': self._search_regex(r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg'), 'h': self._search_regex( r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h'), 'name_action_selected': 'dont_save', } check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form)) check_req.add_header('Content-Type', 'application/x-www-form-urlencoded') check_response = self._download_webpage(check_req, None, note='Confirming login') if re.search(r'id="checkpointSubmitButton"', check_response) is not None: self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self._downloader.report_warning('unable to log in: %s' % compat_str(err)) return def _real_initialize(self): self._login() def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') url = 'https://www.facebook.com/video/video.php?v=%s' % video_id webpage = self._download_webpage(url, video_id) BEFORE = '{swf.addParam(param[0], param[1]);});\n' AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});' m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage) if not m: m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage) if m_msg is not None: raise ExtractorError( 'The video is not available, Facebook said: "%s"' % m_msg.group(1), expected=True) else: raise ExtractorError('Cannot parse data') data = dict(json.loads(m.group(1))) params_raw = compat_urllib_parse.unquote(data['params']) params = json.loads(params_raw) video_data = params['video_data'][0] video_url = video_data.get('hd_src') if not video_url: video_url = video_data['sd_src'] if not video_url: raise ExtractorError('Cannot find video URL') video_title = self._html_search_regex( r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, 'title', fatal=False) if not video_title: video_title = self._html_search_regex( r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>', webpage, 'alternative title', default=None) video_title = limit_length(video_title, 80) if not video_title: video_title = 'Facebook video #%s' % video_id return { 'id': video_id, 'title': video_title, 'url': video_url, 'duration': int(video_data['video_duration']), 'thumbnail': video_data['thumbnail_src'], }
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-39-fixed/youtube-dl/youtube_dl/extractor/facebook.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-39-buggy/youtube-dl/youtube_dl/extractor/facebook.py
youtube-dl-bug-42
import re from .common import InfoExtractor from ..utils import ( find_xpath_attr, fix_xml_all_ampersand, ) class ClipsyndicateIE(InfoExtractor): _VALID_URL = r'http://www\.clipsyndicate\.com/video/play(list/\d+)?/(?P<id>\d+)' _TEST = { u'url': u'http://www.clipsyndicate.com/video/play/4629301/brick_briscoe', u'md5': u'4d7d549451bad625e0ff3d7bd56d776c', u'info_dict': { u'id': u'4629301', u'ext': u'mp4', u'title': u'Brick Briscoe', u'duration': 612, }, } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') js_player = self._download_webpage( 'http://eplayer.clipsyndicate.com/embed/player.js?va_id=%s' % video_id, video_id, u'Downlaoding player') # it includes a required token flvars = self._search_regex(r'flvars: "(.*?)"', js_player, u'flvars') pdoc = self._download_xml( 'http://eplayer.clipsyndicate.com/osmf/playlist?%s' % flvars, video_id, u'Downloading video info', transform_source=fix_xml_all_ampersand) track_doc = pdoc.find('trackList/track') def find_param(name): node = find_xpath_attr(track_doc, './/param', 'name', name) if node is not None: return node.attrib['value'] return { 'id': video_id, 'title': find_param('title'), 'url': track_doc.find('location').text, 'thumbnail': find_param('thumbnail'), 'duration': int(find_param('duration')), } import re from .common import InfoExtractor from ..utils import ( find_xpath_attr, fix_xml_ampersands ) class ClipsyndicateIE(InfoExtractor): _VALID_URL = r'http://www\.clipsyndicate\.com/video/play(list/\d+)?/(?P<id>\d+)' _TEST = { u'url': u'http://www.clipsyndicate.com/video/play/4629301/brick_briscoe', u'md5': u'4d7d549451bad625e0ff3d7bd56d776c', u'info_dict': { u'id': u'4629301', u'ext': u'mp4', u'title': u'Brick Briscoe', u'duration': 612, }, } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') js_player = self._download_webpage( 'http://eplayer.clipsyndicate.com/embed/player.js?va_id=%s' % video_id, video_id, u'Downlaoding player') # it includes a required token flvars = self._search_regex(r'flvars: "(.*?)"', js_player, u'flvars') pdoc = self._download_xml( 'http://eplayer.clipsyndicate.com/osmf/playlist?%s' % flvars, video_id, u'Downloading video info', transform_source=fix_xml_ampersands) track_doc = pdoc.find('trackList/track') def find_param(name): node = find_xpath_attr(track_doc, './/param', 'name', name) if node is not None: return node.attrib['value'] return { 'id': video_id, 'title': find_param('title'), 'url': track_doc.find('location').text, 'thumbnail': find_param('thumbnail'), 'duration': int(find_param('duration')), }
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-42-fixed/youtube-dl/youtube_dl/extractor/clipsyndicate.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-42-buggy/youtube-dl/youtube_dl/extractor/clipsyndicate.py
youtube-dl-bug-14
# coding: utf-8 from __future__ import unicode_literals import itertools import json import os.path import random import re import time import traceback from .common import InfoExtractor, SearchInfoExtractor from ..jsinterp import JSInterpreter from ..swfinterp import SWFInterpreter from ..compat import ( compat_chr, compat_HTTPError, compat_kwargs, compat_parse_qs, compat_urllib_parse_unquote, compat_urllib_parse_unquote_plus, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, compat_urlparse, compat_str, ) from ..utils import ( bool_or_none, clean_html, error_to_compat_str, extract_attributes, ExtractorError, float_or_none, get_element_by_attribute, get_element_by_id, int_or_none, mimetype2ext, orderedSet, parse_codecs, parse_duration, remove_quotes, remove_start, smuggle_url, str_or_none, str_to_int, try_get, unescapeHTML, unified_strdate, unsmuggle_url, uppercase_escape, url_or_none, urlencode_postdata, ) class YoutubeBaseInfoExtractor(InfoExtractor): """Provide base functions for Youtube extractors""" _LOGIN_URL = 'https://accounts.google.com/ServiceLogin' _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge' _LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup' _CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge' _TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}' _NETRC_MACHINE = 'youtube' # If True it will raise an error if no login info is provided _LOGIN_REQUIRED = False _PLAYLIST_ID_RE = r'(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}' def _set_language(self): self._set_cookie( '.youtube.com', 'PREF', 'f1=50000000&hl=en', # YouTube sets the expire time to about two months expire_time=time.time() + 2 * 30 * 24 * 3600) def _ids_to_results(self, ids): return [ self.url_result(vid_id, 'Youtube', video_id=vid_id) for vid_id in ids] def _login(self): """ Attempt to log in to YouTube. True is returned if successful or skipped. False is returned if login failed. If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised. """ username, password = self._get_login_info() # No authentication to be performed if username is None: if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None: raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True) return True login_page = self._download_webpage( self._LOGIN_URL, None, note='Downloading login page', errnote='unable to fetch login page', fatal=False) if login_page is False: return login_form = self._hidden_inputs(login_page) def req(url, f_req, note, errnote): data = login_form.copy() data.update({ 'pstMsg': 1, 'checkConnection': 'youtube', 'checkedDomains': 'youtube', 'hl': 'en', 'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]', 'f.req': json.dumps(f_req), 'flowName': 'GlifWebSignIn', 'flowEntry': 'ServiceLogin', # TODO: reverse actual botguard identifier generation algo 'bgRequest': '["identifier",""]', }) return self._download_json( url, None, note=note, errnote=errnote, transform_source=lambda s: re.sub(r'^[^[]*', '', s), fatal=False, data=urlencode_postdata(data), headers={ 'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8', 'Google-Accounts-XSRF': 1, }) def warn(message): self._downloader.report_warning(message) lookup_req = [ username, None, [], None, 'US', None, None, 2, False, True, [ None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4], 1, [None, None, []], None, None, None, True ], username, ] lookup_results = req( self._LOOKUP_URL, lookup_req, 'Looking up account info', 'Unable to look up account info') if lookup_results is False: return False user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str) if not user_hash: warn('Unable to extract user hash') return False challenge_req = [ user_hash, None, 1, None, [1, None, None, None, [password, None, True]], [ None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4], 1, [None, None, []], None, None, None, True ]] challenge_results = req( self._CHALLENGE_URL, challenge_req, 'Logging in', 'Unable to log in') if challenge_results is False: return login_res = try_get(challenge_results, lambda x: x[0][5], list) if login_res: login_msg = try_get(login_res, lambda x: x[5], compat_str) warn( 'Unable to login: %s' % 'Invalid password' if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg) return False res = try_get(challenge_results, lambda x: x[0][-1], list) if not res: warn('Unable to extract result entry') return False login_challenge = try_get(res, lambda x: x[0][0], list) if login_challenge: challenge_str = try_get(login_challenge, lambda x: x[2], compat_str) if challenge_str == 'TWO_STEP_VERIFICATION': # SEND_SUCCESS - TFA code has been successfully sent to phone # QUOTA_EXCEEDED - reached the limit of TFA codes status = try_get(login_challenge, lambda x: x[5], compat_str) if status == 'QUOTA_EXCEEDED': warn('Exceeded the limit of TFA codes, try later') return False tl = try_get(challenge_results, lambda x: x[1][2], compat_str) if not tl: warn('Unable to extract TL') return False tfa_code = self._get_tfa_info('2-step verification code') if not tfa_code: warn( 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>' '(Note that only TOTP (Google Authenticator App) codes work at this time.)') return False tfa_code = remove_start(tfa_code, 'G-') tfa_req = [ user_hash, None, 2, None, [ 9, None, None, None, None, None, None, None, [None, tfa_code, True, 2] ]] tfa_results = req( self._TFA_URL.format(tl), tfa_req, 'Submitting TFA code', 'Unable to submit TFA code') if tfa_results is False: return False tfa_res = try_get(tfa_results, lambda x: x[0][5], list) if tfa_res: tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str) warn( 'Unable to finish TFA: %s' % 'Invalid TFA code' if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg) return False check_cookie_url = try_get( tfa_results, lambda x: x[0][-1][2], compat_str) else: CHALLENGES = { 'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.", 'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.', 'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.", } challenge = CHALLENGES.get( challenge_str, '%s returned error %s.' % (self.IE_NAME, challenge_str)) warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge) return False else: check_cookie_url = try_get(res, lambda x: x[2], compat_str) if not check_cookie_url: warn('Unable to extract CheckCookie URL') return False check_cookie_results = self._download_webpage( check_cookie_url, None, 'Checking cookie', fatal=False) if check_cookie_results is False: return False if 'https://myaccount.google.com/' not in check_cookie_results: warn('Unable to log in') return False return True def _download_webpage_handle(self, *args, **kwargs): query = kwargs.get('query', {}).copy() query['disable_polymer'] = 'true' kwargs['query'] = query return super(YoutubeBaseInfoExtractor, self)._download_webpage_handle( *args, **compat_kwargs(kwargs)) def _real_initialize(self): if self._downloader is None: return self._set_language() if not self._login(): return class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor): # Extract entries from page with "Load more" button def _entries(self, page, playlist_id): more_widget_html = content_html = page for page_num in itertools.count(1): for entry in self._process_page(content_html): yield entry mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html) if not mobj: break count = 0 retries = 3 while count <= retries: try: # Downloading page may result in intermittent 5xx HTTP error # that is usually worked around with a retry more = self._download_json( 'https://youtube.com/%s' % mobj.group('more'), playlist_id, 'Downloading page #%s%s' % (page_num, ' (retry #%d)' % count if count else ''), transform_source=uppercase_escape) break except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503): count += 1 if count <= retries: continue raise content_html = more['content_html'] if not content_html.strip(): # Some webpages show a "Load more" button but they don't # have more videos break more_widget_html = more['load_more_widget_html'] class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor): def _process_page(self, content): for video_id, video_title in self.extract_videos_from_page(content): yield self.url_result(video_id, 'Youtube', video_id, video_title) def extract_videos_from_page_impl(self, video_re, page, ids_in_page, titles_in_page): for mobj in re.finditer(video_re, page): # The link with index 0 is not the first video of the playlist (not sure if still actual) if 'index' in mobj.groupdict() and mobj.group('id') == '0': continue video_id = mobj.group('id') video_title = unescapeHTML( mobj.group('title')) if 'title' in mobj.groupdict() else None if video_title: video_title = video_title.strip() if video_title == '► Play all': video_title = None try: idx = ids_in_page.index(video_id) if video_title and not titles_in_page[idx]: titles_in_page[idx] = video_title except ValueError: ids_in_page.append(video_id) titles_in_page.append(video_title) def extract_videos_from_page(self, page): ids_in_page = [] titles_in_page = [] self.extract_videos_from_page_impl( self._VIDEO_RE, page, ids_in_page, titles_in_page) return zip(ids_in_page, titles_in_page) class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor): def _process_page(self, content): for playlist_id in orderedSet(re.findall( r'<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*><a[^>]+href="/?playlist\?list=([0-9A-Za-z-_]{10,})"', content)): yield self.url_result( 'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist') def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) title = self._og_search_title(webpage, fatal=False) return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title) class YoutubeIE(YoutubeBaseInfoExtractor): IE_DESC = 'YouTube.com' _VALID_URL = r"""(?x)^ ( (?:https?://|//) # http(s):// or protocol-independent URL (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com/| (?:www\.)?deturl\.com/www\.youtube\.com/| (?:www\.)?pwnyoutube\.com/| (?:www\.)?hooktube\.com/| (?:www\.)?yourepeat\.com/| tube\.majestyc\.net/| # Invidious instances taken from https://github.com/omarroth/invidious/wiki/Invidious-Instances (?:(?:www|dev)\.)?invidio\.us/| (?:(?:www|no)\.)?invidiou\.sh/| (?:(?:www|fi|de)\.)?invidious\.snopyta\.org/| (?:www\.)?invidious\.kabi\.tk/| (?:www\.)?invidious\.13ad\.de/| (?:www\.)?invidious\.mastodon\.host/| (?:www\.)?invidious\.nixnet\.xyz/| (?:www\.)?invidious\.drycat\.fr/| (?:www\.)?tube\.poal\.co/| (?:www\.)?vid\.wxzm\.sx/| (?:www\.)?yewtu\.be/| (?:www\.)?yt\.elukerio\.org/| (?:www\.)?yt\.lelux\.fi/| (?:www\.)?invidious\.ggc-project\.de/| (?:www\.)?yt\.maisputain\.ovh/| (?:www\.)?invidious\.13ad\.de/| (?:www\.)?invidious\.toot\.koeln/| (?:www\.)?invidious\.fdn\.fr/| (?:www\.)?watch\.nettohikari\.com/| (?:www\.)?kgg2m7yk5aybusll\.onion/| (?:www\.)?qklhadlycap4cnod\.onion/| (?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion/| (?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion/| (?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion/| (?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion/| (?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p/| (?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion/| youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains (?:.*?\#/)? # handle anchor (#/) redirect urls (?: # the various things that can precede the ID: (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/ |(?: # or the v= param in all its forms (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx) (?:\?|\#!?) # the params delimiter ? or # or #! (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&amp;v=V36LpHqtcDY) v= ) )) |(?: youtu\.be| # just youtu.be/xxxx vid\.plus| # or vid.plus/xxxx zwearz\.com/watch| # or zwearz.com/watch/xxxx )/ |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId= ) )? # all until now is optional -> you can pass the naked ID ([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID (?!.*?\blist= (?: %(playlist_id)s| # combined list/video URLs are handled by the playlist IE WL # WL are handled by the watch later IE ) ) (?(1).+)? # if we found the ID, everything can follow $""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE} _NEXT_URL_RE = r'[\?&]next_url=([^&]+)' _PLAYER_INFO_RE = ( r'/(?P<id>[a-zA-Z0-9_-]{8,})/player_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?/base\.(?P<ext>[a-z]+)$', r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.(?P<ext>[a-z]+)$', ) _formats = { '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'}, '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'}, '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'}, '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'}, '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'}, '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'}, '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'}, '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'}, # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'}, '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'}, '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'}, '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'}, '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'}, '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'}, '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'}, '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'}, '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'}, # 3D videos '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20}, '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20}, '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20}, '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20}, '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20}, '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20}, '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20}, # Apple HTTP Live Streaming '91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10}, '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10}, '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10}, '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10}, '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10}, '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10}, '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10}, '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10}, # DASH mp4 video '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'}, '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'}, '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'}, '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'}, '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'}, '138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559) '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'}, '212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'}, '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'}, '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60}, '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60}, '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'}, # Dash mp4 audio '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'}, '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'}, '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'}, '256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'}, '258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'}, '325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'}, '328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'}, # Dash webm '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'}, '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'}, '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'}, '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'}, '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'}, '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'}, '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'}, '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'}, '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'}, '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'}, '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'}, '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'}, '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'}, '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'}, '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'}, # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug) '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'}, '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60}, '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60}, '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60}, '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'}, '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60}, # Dash webm audio '171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128}, '172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256}, # Dash webm audio with opus inside '249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50}, '250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70}, '251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160}, # RTMP (unnamed) '_rtmp': {'protocol': 'rtmp'}, # av01 video only formats sometimes served with "unknown" codecs '394': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'}, '395': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'}, '396': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'}, '397': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'}, } _SUBTITLE_FORMATS = ('srv1', 'srv2', 'srv3', 'ttml', 'vtt') _GEO_BYPASS = False IE_NAME = 'youtube' _TESTS = [ { 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9', 'info_dict': { 'id': 'BaW_jenozKc', 'ext': 'mp4', 'title': 'youtube-dl test video "\'/\\ä↭𝕐', 'uploader': 'Philipp Hagemeister', 'uploader_id': 'phihag', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag', 'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q', 'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q', 'upload_date': '20121002', 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact [email protected] .', 'categories': ['Science & Technology'], 'tags': ['youtube-dl'], 'duration': 10, 'view_count': int, 'like_count': int, 'dislike_count': int, 'start_time': 1, 'end_time': 9, } }, { 'url': 'https://www.youtube.com/watch?v=UxxajLWwzqY', 'note': 'Test generic use_cipher_signature video (#897)', 'info_dict': { 'id': 'UxxajLWwzqY', 'ext': 'mp4', 'upload_date': '20120506', 'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]', 'alt_title': 'I Love It (feat. Charli XCX)', 'description': 'md5:19a2f98d9032b9311e686ed039564f63', 'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli', 'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop', 'iconic ep', 'iconic', 'love', 'it'], 'duration': 180, 'uploader': 'Icona Pop', 'uploader_id': 'IconaPop', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IconaPop', 'creator': 'Icona Pop', 'track': 'I Love It (feat. Charli XCX)', 'artist': 'Icona Pop', } }, { 'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ', 'note': 'Test VEVO video with age protection (#956)', 'info_dict': { 'id': '07FYdnEawAQ', 'ext': 'mp4', 'upload_date': '20130703', 'title': 'Justin Timberlake - Tunnel Vision (Official Music Video) (Explicit)', 'alt_title': 'Tunnel Vision', 'description': 'md5:07dab3356cde4199048e4c7cd93471e1', 'duration': 419, 'uploader': 'justintimberlakeVEVO', 'uploader_id': 'justintimberlakeVEVO', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/justintimberlakeVEVO', 'creator': 'Justin Timberlake', 'track': 'Tunnel Vision', 'artist': 'Justin Timberlake', 'age_limit': 18, } }, { 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ', 'note': 'Embed-only video (#1746)', 'info_dict': { 'id': 'yZIXLfi8CZQ', 'ext': 'mp4', 'upload_date': '20120608', 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012', 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7', 'uploader': 'SET India', 'uploader_id': 'setindia', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia', 'age_limit': 18, } }, { 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=UxxajLWwzqY', 'note': 'Use the first video ID in the URL', 'info_dict': { 'id': 'BaW_jenozKc', 'ext': 'mp4', 'title': 'youtube-dl test video "\'/\\ä↭𝕐', 'uploader': 'Philipp Hagemeister', 'uploader_id': 'phihag', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag', 'upload_date': '20121002', 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact [email protected] .', 'categories': ['Science & Technology'], 'tags': ['youtube-dl'], 'duration': 10, 'view_count': int, 'like_count': int, 'dislike_count': int, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I', 'note': '256k DASH audio (format 141) via DASH manifest', 'info_dict': { 'id': 'a9LDPn-MO4I', 'ext': 'm4a', 'upload_date': '20121002', 'uploader_id': '8KVIDEO', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO', 'description': '', 'uploader': '8KVIDEO', 'title': 'UHDTV TEST 8K VIDEO.mp4' }, 'params': { 'youtube_include_dash_manifest': True, 'format': '141', }, 'skip': 'format 141 not served anymore', }, # DASH manifest with encrypted signature { 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA', 'info_dict': { 'id': 'IB3lcPjvWLA', 'ext': 'm4a', 'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson', 'description': 'md5:8f5e2b82460520b619ccac1f509d43bf', 'duration': 244, 'uploader': 'AfrojackVEVO', 'uploader_id': 'AfrojackVEVO', 'upload_date': '20131011', }, 'params': { 'youtube_include_dash_manifest': True, 'format': '141/bestaudio[ext=m4a]', }, }, # JS player signature function name containing $ { 'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM', 'info_dict': { 'id': 'nfWlot6h_JM', 'ext': 'm4a', 'title': 'Taylor Swift - Shake It Off', 'description': 'md5:307195cd21ff7fa352270fe884570ef0', 'duration': 242, 'uploader': 'TaylorSwiftVEVO', 'uploader_id': 'TaylorSwiftVEVO', 'upload_date': '20140818', }, 'params': { 'youtube_include_dash_manifest': True, 'format': '141/bestaudio[ext=m4a]', }, }, # Controversy video { 'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8', 'info_dict': { 'id': 'T4XJQO3qol8', 'ext': 'mp4', 'duration': 219, 'upload_date': '20100909', 'uploader': 'Amazing Atheist', 'uploader_id': 'TheAmazingAtheist', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist', 'title': 'Burning Everyone\'s Koran', 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html', } }, # Normal age-gate video (No vevo, embed allowed) { 'url': 'https://youtube.com/watch?v=HtVdAasjOgU', 'info_dict': { 'id': 'HtVdAasjOgU', 'ext': 'mp4', 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer', 'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}', 'duration': 142, 'uploader': 'The Witcher', 'uploader_id': 'WitcherGame', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame', 'upload_date': '20140605', 'age_limit': 18, }, }, # Age-gate video with encrypted signature { 'url': 'https://www.youtube.com/watch?v=6kLq3WMV1nU', 'info_dict': { 'id': '6kLq3WMV1nU', 'ext': 'mp4', 'title': 'Dedication To My Ex (Miss That) (Lyric Video)', 'description': 'md5:33765bb339e1b47e7e72b5490139bb41', 'duration': 246, 'uploader': 'LloydVEVO', 'uploader_id': 'LloydVEVO', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/LloydVEVO', 'upload_date': '20110629', 'age_limit': 18, }, }, # video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421) # YouTube Red ad is not captured for creator { 'url': '__2ABJjxzNo', 'info_dict': { 'id': '__2ABJjxzNo', 'ext': 'mp4', 'duration': 266, 'upload_date': '20100430', 'uploader_id': 'deadmau5', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5', 'creator': 'Dada Life, deadmau5', 'description': 'md5:12c56784b8032162bb936a5f76d55360', 'uploader': 'deadmau5', 'title': 'Deadmau5 - Some Chords (HD)', 'alt_title': 'This Machine Kills Some Chords', }, 'expected_warnings': [ 'DASH manifest missing', ] }, # Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431) { 'url': 'lqQg6PlCWgI', 'info_dict': { 'id': 'lqQg6PlCWgI', 'ext': 'mp4', 'duration': 6085, 'upload_date': '20150827', 'uploader_id': 'olympic', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic', 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games', 'uploader': 'Olympic', 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games', }, 'params': { 'skip_download': 'requires avconv', } }, # Non-square pixels { 'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0', 'info_dict': { 'id': '_b-2C3KPAM0', 'ext': 'mp4', 'stretched_ratio': 16 / 9., 'duration': 85, 'upload_date': '20110310', 'uploader_id': 'AllenMeow', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow', 'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯', 'uploader': '孫ᄋᄅ', 'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人', }, }, # url_encoded_fmt_stream_map is empty string { 'url': 'qEJwOuvDf7I', 'info_dict': { 'id': 'qEJwOuvDf7I', 'ext': 'webm', 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге', 'description': '', 'upload_date': '20150404', 'uploader_id': 'spbelect', 'uploader': 'Наблюдатели Петербурга', }, 'params': { 'skip_download': 'requires avconv', }, 'skip': 'This live event has ended.', }, # Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097) { 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y', 'info_dict': { 'id': 'FIl7x6_3R5Y', 'ext': 'webm', 'title': 'md5:7b81415841e02ecd4313668cde88737a', 'description': 'md5:116377fd2963b81ec4ce64b542173306', 'duration': 220, 'upload_date': '20150625', 'uploader_id': 'dorappi2000', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000', 'uploader': 'dorappi2000', 'formats': 'mincount:31', }, 'skip': 'not actual anymore', }, # DASH manifest with segment_list { 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8', 'md5': '8ce563a1d667b599d21064e982ab9e31', 'info_dict': { 'id': 'CsmdDsKjzN8', 'ext': 'mp4', 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510 'uploader': 'Airtek', 'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.', 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ', 'title': 'Retransmisión XVIII Media maratón Zaragoza 2015', }, 'params': { 'youtube_include_dash_manifest': True, 'format': '135', # bestvideo }, 'skip': 'This live event has ended.', }, { # Multifeed videos (multiple cameras), URL is for Main Camera 'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs', 'info_dict': { 'id': 'jqWvoWXjCVs', 'title': 'teamPGP: Rocket League Noob Stream', 'description': 'md5:dc7872fb300e143831327f1bae3af010', }, 'playlist': [{ 'info_dict': { 'id': 'jqWvoWXjCVs', 'ext': 'mp4', 'title': 'teamPGP: Rocket League Noob Stream (Main Camera)', 'description': 'md5:dc7872fb300e143831327f1bae3af010', 'duration': 7335, 'upload_date': '20150721', 'uploader': 'Beer Games Beer', 'uploader_id': 'beergamesbeer', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer', 'license': 'Standard YouTube License', }, }, { 'info_dict': { 'id': '6h8e8xoXJzg', 'ext': 'mp4', 'title': 'teamPGP: Rocket League Noob Stream (kreestuh)', 'description': 'md5:dc7872fb300e143831327f1bae3af010', 'duration': 7337, 'upload_date': '20150721', 'uploader': 'Beer Games Beer', 'uploader_id': 'beergamesbeer', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer', 'license': 'Standard YouTube License', }, }, { 'info_dict': { 'id': 'PUOgX5z9xZw', 'ext': 'mp4', 'title': 'teamPGP: Rocket League Noob Stream (grizzle)', 'description': 'md5:dc7872fb300e143831327f1bae3af010', 'duration': 7337, 'upload_date': '20150721', 'uploader': 'Beer Games Beer', 'uploader_id': 'beergamesbeer', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer', 'license': 'Standard YouTube License', }, }, { 'info_dict': { 'id': 'teuwxikvS5k', 'ext': 'mp4', 'title': 'teamPGP: Rocket League Noob Stream (zim)', 'description': 'md5:dc7872fb300e143831327f1bae3af010', 'duration': 7334, 'upload_date': '20150721', 'uploader': 'Beer Games Beer', 'uploader_id': 'beergamesbeer', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer', 'license': 'Standard YouTube License', }, }], 'params': { 'skip_download': True, }, 'skip': 'This video is not available.', }, { # Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536) 'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo', 'info_dict': { 'id': 'gVfLd0zydlo', 'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30', }, 'playlist_count': 2, 'skip': 'Not multifeed anymore', }, { 'url': 'https://vid.plus/FlRa-iH7PGw', 'only_matching': True, }, { 'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html', 'only_matching': True, }, { # Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468) # Also tests cut-off URL expansion in video description (see # https://github.com/ytdl-org/youtube-dl/issues/1892, # https://github.com/ytdl-org/youtube-dl/issues/8164) 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg', 'info_dict': { 'id': 'lsguqyKfVQg', 'ext': 'mp4', 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21', 'alt_title': 'Dark Walk - Position Music', 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a', 'duration': 133, 'upload_date': '20151119', 'uploader_id': 'IronSoulElf', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf', 'uploader': 'IronSoulElf', 'creator': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan', 'track': 'Dark Walk - Position Music', 'artist': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan', 'album': 'Position Music - Production Music Vol. 143 - Dark Walk', }, 'params': { 'skip_download': True, }, }, { # Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468) 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8', 'only_matching': True, }, { # Video with yt:stretch=17:0 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM', 'info_dict': { 'id': 'Q39EVAstoRM', 'ext': 'mp4', 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4', 'description': 'md5:ee18a25c350637c8faff806845bddee9', 'upload_date': '20151107', 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA', 'uploader': 'CH GAMER DROID', }, 'params': { 'skip_download': True, }, 'skip': 'This video does not exist.', }, { # Video licensed under Creative Commons 'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA', 'info_dict': { 'id': 'M4gD1WSo5mA', 'ext': 'mp4', 'title': 'md5:e41008789470fc2533a3252216f1c1d1', 'description': 'md5:a677553cf0840649b731a3024aeff4cc', 'duration': 721, 'upload_date': '20150127', 'uploader_id': 'BerkmanCenter', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter', 'uploader': 'The Berkman Klein Center for Internet & Society', 'license': 'Creative Commons Attribution license (reuse allowed)', }, 'params': { 'skip_download': True, }, }, { # Channel-like uploader_url 'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg', 'info_dict': { 'id': 'eQcmzGIKrzg', 'ext': 'mp4', 'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders', 'description': 'md5:dda0d780d5a6e120758d1711d062a867', 'duration': 4060, 'upload_date': '20151119', 'uploader': 'Bernie Sanders', 'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg', 'license': 'Creative Commons Attribution license (reuse allowed)', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;amp;v=V36LpHqtcDY', 'only_matching': True, }, { # YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059) 'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo', 'only_matching': True, }, { # Rental video preview 'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg', 'info_dict': { 'id': 'uGpuVWrhIzE', 'ext': 'mp4', 'title': 'Piku - Trailer', 'description': 'md5:c36bd60c3fd6f1954086c083c72092eb', 'upload_date': '20150811', 'uploader': 'FlixMatrix', 'uploader_id': 'FlixMatrixKaravan', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan', 'license': 'Standard YouTube License', }, 'params': { 'skip_download': True, }, 'skip': 'This video is not available.', }, { # YouTube Red video with episode data 'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4', 'info_dict': { 'id': 'iqKdEhx-dD4', 'ext': 'mp4', 'title': 'Isolation - Mind Field (Ep 1)', 'description': 'md5:46a29be4ceffa65b92d277b93f463c0f', 'duration': 2085, 'upload_date': '20170118', 'uploader': 'Vsauce', 'uploader_id': 'Vsauce', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce', 'series': 'Mind Field', 'season_number': 1, 'episode_number': 1, }, 'params': { 'skip_download': True, }, 'expected_warnings': [ 'Skipping DASH manifest', ], }, { # The following content has been identified by the YouTube community # as inappropriate or offensive to some audiences. 'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI', 'info_dict': { 'id': '6SJNVb0GnPI', 'ext': 'mp4', 'title': 'Race Differences in Intelligence', 'description': 'md5:5d161533167390427a1f8ee89a1fc6f1', 'duration': 965, 'upload_date': '20140124', 'uploader': 'New Century Foundation', 'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg', }, 'params': { 'skip_download': True, }, }, { # itag 212 'url': '1t24XAntNCY', 'only_matching': True, }, { # geo restricted to JP 'url': 'sJL6WA-aGkQ', 'only_matching': True, }, { 'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM', 'only_matching': True, }, { 'url': 'https://invidio.us/watch?v=BaW_jenozKc', 'only_matching': True, }, { # DRM protected 'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc', 'only_matching': True, }, { # Video with unsupported adaptive stream type formats 'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U', 'info_dict': { 'id': 'Z4Vy8R84T1U', 'ext': 'mp4', 'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'duration': 433, 'upload_date': '20130923', 'uploader': 'Amelia Putri Harwita', 'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q', 'formats': 'maxcount:10', }, 'params': { 'skip_download': True, 'youtube_include_dash_manifest': False, }, 'skip': 'not actual anymore', }, { # Youtube Music Auto-generated description 'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs', 'info_dict': { 'id': 'MgNrAu2pzNs', 'ext': 'mp4', 'title': 'Voyeur Girl', 'description': 'md5:7ae382a65843d6df2685993e90a8628f', 'upload_date': '20190312', 'uploader': 'Stephen - Topic', 'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA', 'artist': 'Stephen', 'track': 'Voyeur Girl', 'album': 'it\'s too much love to know my dear', 'release_date': '20190313', 'release_year': 2019, }, 'params': { 'skip_download': True, }, }, { # Youtube Music Auto-generated description # Retrieve 'artist' field from 'Artist:' in video description # when it is present on youtube music video 'url': 'https://www.youtube.com/watch?v=k0jLE7tTwjY', 'info_dict': { 'id': 'k0jLE7tTwjY', 'ext': 'mp4', 'title': 'Latch Feat. Sam Smith', 'description': 'md5:3cb1e8101a7c85fcba9b4fb41b951335', 'upload_date': '20150110', 'uploader': 'Various Artists - Topic', 'uploader_id': 'UCNkEcmYdjrH4RqtNgh7BZ9w', 'artist': 'Disclosure', 'track': 'Latch Feat. Sam Smith', 'album': 'Latch Featuring Sam Smith', 'release_date': '20121008', 'release_year': 2012, }, 'params': { 'skip_download': True, }, }, { # Youtube Music Auto-generated description # handle multiple artists on youtube music video 'url': 'https://www.youtube.com/watch?v=74qn0eJSjpA', 'info_dict': { 'id': '74qn0eJSjpA', 'ext': 'mp4', 'title': 'Eastside', 'description': 'md5:290516bb73dcbfab0dcc4efe6c3de5f2', 'upload_date': '20180710', 'uploader': 'Benny Blanco - Topic', 'uploader_id': 'UCzqz_ksRu_WkIzmivMdIS7A', 'artist': 'benny blanco, Halsey, Khalid', 'track': 'Eastside', 'album': 'Eastside', 'release_date': '20180713', 'release_year': 2018, }, 'params': { 'skip_download': True, }, }, { # Youtube Music Auto-generated description # handle youtube music video with release_year and no release_date 'url': 'https://www.youtube.com/watch?v=-hcAI0g-f5M', 'info_dict': { 'id': '-hcAI0g-f5M', 'ext': 'mp4', 'title': 'Put It On Me', 'description': 'md5:f6422397c07c4c907c6638e1fee380a5', 'upload_date': '20180426', 'uploader': 'Matt Maeson - Topic', 'uploader_id': 'UCnEkIGqtGcQMLk73Kp-Q5LQ', 'artist': 'Matt Maeson', 'track': 'Put It On Me', 'album': 'The Hearse', 'release_date': None, 'release_year': 2018, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q', 'only_matching': True, }, { # invalid -> valid video id redirection 'url': 'DJztXj2GPfl', 'info_dict': { 'id': 'DJztXj2GPfk', 'ext': 'mp4', 'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)', 'description': 'md5:bf577a41da97918e94fa9798d9228825', 'upload_date': '20090125', 'uploader': 'Prochorowka', 'uploader_id': 'Prochorowka', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka', 'artist': 'Panjabi MC', 'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix', 'album': 'Beware of the Boys (Mundian To Bach Ke)', }, 'params': { 'skip_download': True, }, } ] def __init__(self, *args, **kwargs): super(YoutubeIE, self).__init__(*args, **kwargs) self._player_cache = {} def report_video_info_webpage_download(self, video_id): """Report attempt to download video info webpage.""" self.to_screen('%s: Downloading video info webpage' % video_id) def report_information_extraction(self, video_id): """Report attempt to extract video information.""" self.to_screen('%s: Extracting video information' % video_id) def report_unavailable_format(self, video_id, format): """Report extracted video URL.""" self.to_screen('%s: Format %s not available' % (video_id, format)) def report_rtmp_download(self): """Indicate the download will use the RTMP protocol.""" self.to_screen('RTMP download detected') def _signature_cache_id(self, example_sig): """ Return a string representation of a signature """ return '.'.join(compat_str(len(part)) for part in example_sig.split('.')) @classmethod def _extract_player_info(cls, player_url): for player_re in cls._PLAYER_INFO_RE: id_m = re.search(player_re, player_url) if id_m: break else: raise ExtractorError('Cannot identify player %r' % player_url) return id_m.group('ext'), id_m.group('id') def _extract_signature_function(self, video_id, player_url, example_sig): player_type, player_id = self._extract_player_info(player_url) # Read from filesystem cache func_id = '%s_%s_%s' % ( player_type, player_id, self._signature_cache_id(example_sig)) assert os.path.basename(func_id) == func_id cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id) if cache_spec is not None: return lambda s: ''.join(s[i] for i in cache_spec) download_note = ( 'Downloading player %s' % player_url if self._downloader.params.get('verbose') else 'Downloading %s player %s' % (player_type, player_id) ) if player_type == 'js': code = self._download_webpage( player_url, video_id, note=download_note, errnote='Download of %s failed' % player_url) res = self._parse_sig_js(code) elif player_type == 'swf': urlh = self._request_webpage( player_url, video_id, note=download_note, errnote='Download of %s failed' % player_url) code = urlh.read() res = self._parse_sig_swf(code) else: assert False, 'Invalid player type %r' % player_type test_string = ''.join(map(compat_chr, range(len(example_sig)))) cache_res = res(test_string) cache_spec = [ord(c) for c in cache_res] self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec) return res def _print_sig_code(self, func, example_sig): def gen_sig_code(idxs): def _genslice(start, end, step): starts = '' if start == 0 else str(start) ends = (':%d' % (end + step)) if end + step >= 0 else ':' steps = '' if step == 1 else (':%d' % step) return 's[%s%s%s]' % (starts, ends, steps) step = None # Quelch pyflakes warnings - start will be set when step is set start = '(Never used)' for i, prev in zip(idxs[1:], idxs[:-1]): if step is not None: if i - prev == step: continue yield _genslice(start, prev, step) step = None continue if i - prev in [-1, 1]: step = i - prev start = prev continue else: yield 's[%d]' % prev if step is None: yield 's[%d]' % i else: yield _genslice(start, i, step) test_string = ''.join(map(compat_chr, range(len(example_sig)))) cache_res = func(test_string) cache_spec = [ord(c) for c in cache_res] expr_code = ' + '.join(gen_sig_code(cache_spec)) signature_id_tuple = '(%s)' % ( ', '.join(compat_str(len(p)) for p in example_sig.split('.'))) code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n' ' return %s\n') % (signature_id_tuple, expr_code) self.to_screen('Extracted signature function:\n' + code) def _parse_sig_js(self, jscode): funcname = self._search_regex( (r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(', r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(', r'\b(?P<sig>[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)', r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)', # Obsolete patterns r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(', r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(', r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(', r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(', r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(', r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(', r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(', r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('), jscode, 'Initial JS player signature function name', group='sig') jsi = JSInterpreter(jscode) initial_function = jsi.extract_function(funcname) return lambda s: initial_function([s]) def _parse_sig_swf(self, file_contents): swfi = SWFInterpreter(file_contents) TARGET_CLASSNAME = 'SignatureDecipher' searched_class = swfi.extract_class(TARGET_CLASSNAME) initial_function = swfi.extract_function(searched_class, 'decipher') return lambda s: initial_function([s]) def _decrypt_signature(self, s, video_id, player_url, age_gate=False): """Turn the encrypted s field into a working signature""" if player_url is None: raise ExtractorError('Cannot decrypt signature without player_url') if player_url.startswith('//'): player_url = 'https:' + player_url elif not re.match(r'https?://', player_url): player_url = compat_urlparse.urljoin( 'https://www.youtube.com', player_url) try: player_id = (player_url, self._signature_cache_id(s)) if player_id not in self._player_cache: func = self._extract_signature_function( video_id, player_url, s ) self._player_cache[player_id] = func func = self._player_cache[player_id] if self._downloader.params.get('youtube_print_sig_code'): self._print_sig_code(func, s) return func(s) except Exception as e: tb = traceback.format_exc() raise ExtractorError( 'Signature extraction failed: ' + tb, cause=e) def _get_subtitles(self, video_id, webpage): try: subs_doc = self._download_xml( 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id, video_id, note=False) except ExtractorError as err: self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err)) return {} sub_lang_list = {} for track in subs_doc.findall('track'): lang = track.attrib['lang_code'] if lang in sub_lang_list: continue sub_formats = [] for ext in self._SUBTITLE_FORMATS: params = compat_urllib_parse_urlencode({ 'lang': lang, 'v': video_id, 'fmt': ext, 'name': track.attrib['name'].encode('utf-8'), }) sub_formats.append({ 'url': 'https://www.youtube.com/api/timedtext?' + params, 'ext': ext, }) sub_lang_list[lang] = sub_formats if not sub_lang_list: self._downloader.report_warning('video doesn\'t have subtitles') return {} return sub_lang_list def _get_ytplayer_config(self, video_id, webpage): patterns = ( # User data may contain arbitrary character sequences that may affect # JSON extraction with regex, e.g. when '};' is contained the second # regex won't capture the whole JSON. Yet working around by trying more # concrete regex first keeping in mind proper quoted string handling # to be implemented in future that will replace this workaround (see # https://github.com/ytdl-org/youtube-dl/issues/7468, # https://github.com/ytdl-org/youtube-dl/pull/7599) r';ytplayer\.config\s*=\s*({.+?});ytplayer', r';ytplayer\.config\s*=\s*({.+?});', ) config = self._search_regex( patterns, webpage, 'ytplayer.config', default=None) if config: return self._parse_json( uppercase_escape(config), video_id, fatal=False) def _get_automatic_captions(self, video_id, webpage): """We need the webpage for getting the captions url, pass it as an argument to speed up the process.""" self.to_screen('%s: Looking for automatic captions' % video_id) player_config = self._get_ytplayer_config(video_id, webpage) err_msg = 'Couldn\'t find automatic captions for %s' % video_id if not player_config: self._downloader.report_warning(err_msg) return {} try: args = player_config['args'] caption_url = args.get('ttsurl') if caption_url: timestamp = args['timestamp'] # We get the available subtitles list_params = compat_urllib_parse_urlencode({ 'type': 'list', 'tlangs': 1, 'asrs': 1, }) list_url = caption_url + '&' + list_params caption_list = self._download_xml(list_url, video_id) original_lang_node = caption_list.find('track') if original_lang_node is None: self._downloader.report_warning('Video doesn\'t have automatic captions') return {} original_lang = original_lang_node.attrib['lang_code'] caption_kind = original_lang_node.attrib.get('kind', '') sub_lang_list = {} for lang_node in caption_list.findall('target'): sub_lang = lang_node.attrib['lang_code'] sub_formats = [] for ext in self._SUBTITLE_FORMATS: params = compat_urllib_parse_urlencode({ 'lang': original_lang, 'tlang': sub_lang, 'fmt': ext, 'ts': timestamp, 'kind': caption_kind, }) sub_formats.append({ 'url': caption_url + '&' + params, 'ext': ext, }) sub_lang_list[sub_lang] = sub_formats return sub_lang_list def make_captions(sub_url, sub_langs): parsed_sub_url = compat_urllib_parse_urlparse(sub_url) caption_qs = compat_parse_qs(parsed_sub_url.query) captions = {} for sub_lang in sub_langs: sub_formats = [] for ext in self._SUBTITLE_FORMATS: caption_qs.update({ 'tlang': [sub_lang], 'fmt': [ext], }) sub_url = compat_urlparse.urlunparse(parsed_sub_url._replace( query=compat_urllib_parse_urlencode(caption_qs, True))) sub_formats.append({ 'url': sub_url, 'ext': ext, }) captions[sub_lang] = sub_formats return captions # New captions format as of 22.06.2017 player_response = args.get('player_response') if player_response and isinstance(player_response, compat_str): player_response = self._parse_json( player_response, video_id, fatal=False) if player_response: renderer = player_response['captions']['playerCaptionsTracklistRenderer'] base_url = renderer['captionTracks'][0]['baseUrl'] sub_lang_list = [] for lang in renderer['translationLanguages']: lang_code = lang.get('languageCode') if lang_code: sub_lang_list.append(lang_code) return make_captions(base_url, sub_lang_list) # Some videos don't provide ttsurl but rather caption_tracks and # caption_translation_languages (e.g. 20LmZk1hakA) # Does not used anymore as of 22.06.2017 caption_tracks = args['caption_tracks'] caption_translation_languages = args['caption_translation_languages'] caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0] sub_lang_list = [] for lang in caption_translation_languages.split(','): lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang)) sub_lang = lang_qs.get('lc', [None])[0] if sub_lang: sub_lang_list.append(sub_lang) return make_captions(caption_url, sub_lang_list) # An extractor error can be raise by the download process if there are # no automatic captions but there are subtitles except (KeyError, IndexError, ExtractorError): self._downloader.report_warning(err_msg) return {} def _mark_watched(self, video_id, video_info, player_response): playback_url = url_or_none(try_get( player_response, lambda x: x['playbackTracking']['videostatsPlaybackUrl']['baseUrl']) or try_get( video_info, lambda x: x['videostats_playback_base_url'][0])) if not playback_url: return parsed_playback_url = compat_urlparse.urlparse(playback_url) qs = compat_urlparse.parse_qs(parsed_playback_url.query) # cpn generation algorithm is reverse engineered from base.js. # In fact it works even with dummy cpn. CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_' cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16))) qs.update({ 'ver': ['2'], 'cpn': [cpn], }) playback_url = compat_urlparse.urlunparse( parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True))) self._download_webpage( playback_url, video_id, 'Marking watched', 'Unable to mark watched', fatal=False) @staticmethod def _extract_urls(webpage): # Embedded YouTube player entries = [ unescapeHTML(mobj.group('url')) for mobj in re.finditer(r'''(?x) (?: <iframe[^>]+?src=| data-video-url=| <embed[^>]+?src=| embedSWF\(?:\s*| <object[^>]+data=| new\s+SWFObject\( ) (["\']) (?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/ (?:embed|v|p)/[0-9A-Za-z_-]{11}.*?) \1''', webpage)] # lazyYT YouTube embed entries.extend(list(map( unescapeHTML, re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage)))) # Wordpress "YouTube Video Importer" plugin matches = re.findall(r'''(?x)<div[^>]+ class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+ data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage) entries.extend(m[-1] for m in matches) return entries @staticmethod def _extract_url(webpage): urls = YoutubeIE._extract_urls(webpage) return urls[0] if urls else None @classmethod def extract_id(cls, url): mobj = re.match(cls._VALID_URL, url, re.VERBOSE) if mobj is None: raise ExtractorError('Invalid URL: %s' % url) video_id = mobj.group(2) return video_id @staticmethod def _extract_chapters(description, duration): if not description: return None chapter_lines = re.findall( r'(?:^|<br\s*/>)([^<]*<a[^>]+onclick=["\']yt\.www\.watch\.player\.seekTo[^>]+>(\d{1,2}:\d{1,2}(?::\d{1,2})?)</a>[^>]*)(?=$|<br\s*/>)', description) if not chapter_lines: return None chapters = [] for next_num, (chapter_line, time_point) in enumerate( chapter_lines, start=1): start_time = parse_duration(time_point) if start_time is None: continue if start_time > duration: break end_time = (duration if next_num == len(chapter_lines) else parse_duration(chapter_lines[next_num][1])) if end_time is None: continue if end_time > duration: end_time = duration if start_time > end_time: break chapter_title = re.sub( r'<a[^>]+>[^<]+</a>', '', chapter_line).strip(' \t-') chapter_title = re.sub(r'\s+', ' ', chapter_title) chapters.append({ 'start_time': start_time, 'end_time': end_time, 'title': chapter_title, }) return chapters def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) proto = ( 'http' if self._downloader.params.get('prefer_insecure', False) else 'https') start_time = None end_time = None parsed_url = compat_urllib_parse_urlparse(url) for component in [parsed_url.fragment, parsed_url.query]: query = compat_parse_qs(component) if start_time is None and 't' in query: start_time = parse_duration(query['t'][0]) if start_time is None and 'start' in query: start_time = parse_duration(query['start'][0]) if end_time is None and 'end' in query: end_time = parse_duration(query['end'][0]) # Extract original video URL from URL with redirection, like age verification, using next_url parameter mobj = re.search(self._NEXT_URL_RE, url) if mobj: url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/') video_id = self.extract_id(url) # Get video webpage url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id video_webpage, urlh = self._download_webpage_handle(url, video_id) qs = compat_parse_qs(compat_urllib_parse_urlparse(urlh.geturl()).query) video_id = qs.get('v', [None])[0] or video_id # Attempt to extract SWF player URL mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage) if mobj is not None: player_url = re.sub(r'\\(.)', r'\1', mobj.group(1)) else: player_url = None dash_mpds = [] def add_dash_mpd(video_info): dash_mpd = video_info.get('dashmpd') if dash_mpd and dash_mpd[0] not in dash_mpds: dash_mpds.append(dash_mpd[0]) def add_dash_mpd_pr(pl_response): dash_mpd = url_or_none(try_get( pl_response, lambda x: x['streamingData']['dashManifestUrl'], compat_str)) if dash_mpd and dash_mpd not in dash_mpds: dash_mpds.append(dash_mpd) is_live = None view_count = None def extract_view_count(v_info): return int_or_none(try_get(v_info, lambda x: x['view_count'][0])) def extract_player_response(player_response, video_id): pl_response = str_or_none(player_response) if not pl_response: return pl_response = self._parse_json(pl_response, video_id, fatal=False) if isinstance(pl_response, dict): add_dash_mpd_pr(pl_response) return pl_response player_response = {} # Get video info video_info = {} embed_webpage = None if re.search(r'player-age-gate-content">', video_webpage) is not None: age_gate = True # We simulate the access to the video from www.youtube.com/v/{video_id} # this can be viewed without login into Youtube url = proto + '://www.youtube.com/embed/%s' % video_id embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage') data = compat_urllib_parse_urlencode({ 'video_id': video_id, 'eurl': 'https://youtube.googleapis.com/v/' + video_id, 'sts': self._search_regex( r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''), }) video_info_url = proto + '://www.youtube.com/get_video_info?' + data try: video_info_webpage = self._download_webpage( video_info_url, video_id, note='Refetching age-gated info webpage', errnote='unable to download video info webpage') except ExtractorError: video_info_webpage = None if video_info_webpage: video_info = compat_parse_qs(video_info_webpage) pl_response = video_info.get('player_response', [None])[0] player_response = extract_player_response(pl_response, video_id) add_dash_mpd(video_info) view_count = extract_view_count(video_info) else: age_gate = False # Try looking directly into the video webpage ytplayer_config = self._get_ytplayer_config(video_id, video_webpage) if ytplayer_config: args = ytplayer_config['args'] if args.get('url_encoded_fmt_stream_map') or args.get('hlsvp'): # Convert to the same format returned by compat_parse_qs video_info = dict((k, [v]) for k, v in args.items()) add_dash_mpd(video_info) # Rental video is not rented but preview is available (e.g. # https://www.youtube.com/watch?v=yYr8q0y5Jfg, # https://github.com/ytdl-org/youtube-dl/issues/10532) if not video_info and args.get('ypc_vid'): return self.url_result( args['ypc_vid'], YoutubeIE.ie_key(), video_id=args['ypc_vid']) if args.get('livestream') == '1' or args.get('live_playback') == 1: is_live = True if not player_response: player_response = extract_player_response(args.get('player_response'), video_id) if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True): add_dash_mpd_pr(player_response) def extract_unavailable_message(): messages = [] for tag, kind in (('h1', 'message'), ('div', 'submessage')): msg = self._html_search_regex( r'(?s)<{tag}[^>]+id=["\']unavailable-{kind}["\'][^>]*>(.+?)</{tag}>'.format(tag=tag, kind=kind), video_webpage, 'unavailable %s' % kind, default=None) if msg: messages.append(msg) if messages: return '\n'.join(messages) if not video_info and not player_response: unavailable_message = extract_unavailable_message() if not unavailable_message: unavailable_message = 'Unable to extract video data' raise ExtractorError( 'YouTube said: %s' % unavailable_message, expected=True, video_id=video_id) if not isinstance(video_info, dict): video_info = {} video_details = try_get( player_response, lambda x: x['videoDetails'], dict) or {} video_title = video_info.get('title', [None])[0] or video_details.get('title') if not video_title: self._downloader.report_warning('Unable to extract video title') video_title = '_' description_original = video_description = get_element_by_id("eow-description", video_webpage) if video_description: def replace_url(m): redir_url = compat_urlparse.urljoin(url, m.group(1)) parsed_redir_url = compat_urllib_parse_urlparse(redir_url) if re.search(r'^(?:www\.)?(?:youtube(?:-nocookie)?\.com|youtu\.be)$', parsed_redir_url.netloc) and parsed_redir_url.path == '/redirect': qs = compat_parse_qs(parsed_redir_url.query) q = qs.get('q') if q and q[0]: return q[0] return redir_url description_original = video_description = re.sub(r'''(?x) <a\s+ (?:[a-zA-Z-]+="[^"]*"\s+)*? (?:title|href)="([^"]+)"\s+ (?:[a-zA-Z-]+="[^"]*"\s+)*? class="[^"]*"[^>]*> [^<]+\.{3}\s* </a> ''', replace_url, video_description) video_description = clean_html(video_description) else: video_description = self._html_search_meta('description', video_webpage) or video_details.get('shortDescription') if not smuggled_data.get('force_singlefeed', False): if not self._downloader.params.get('noplaylist'): multifeed_metadata_list = try_get( player_response, lambda x: x['multicamera']['playerLegacyMulticameraRenderer']['metadataList'], compat_str) or try_get( video_info, lambda x: x['multifeed_metadata_list'][0], compat_str) if multifeed_metadata_list: entries = [] feed_ids = [] for feed in multifeed_metadata_list.split(','): # Unquote should take place before split on comma (,) since textual # fields may contain comma as well (see # https://github.com/ytdl-org/youtube-dl/issues/8536) feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed)) def feed_entry(name): return try_get(feed_data, lambda x: x[name][0], compat_str) feed_id = feed_entry('id') if not feed_id: continue feed_title = feed_entry('title') title = video_title if feed_title: title += ' (%s)' % feed_title entries.append({ '_type': 'url_transparent', 'ie_key': 'Youtube', 'url': smuggle_url( '%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]), {'force_singlefeed': True}), 'title': title, }) feed_ids.append(feed_id) self.to_screen( 'Downloading multifeed video (%s) - add --no-playlist to just download video %s' % (', '.join(feed_ids), video_id)) return self.playlist_result(entries, video_id, video_title, video_description) else: self.to_screen('Downloading just video %s because of --no-playlist' % video_id) if view_count is None: view_count = extract_view_count(video_info) if view_count is None and video_details: view_count = int_or_none(video_details.get('viewCount')) if is_live is None: is_live = bool_or_none(video_details.get('isLive')) # Check for "rental" videos if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info: raise ExtractorError('"rental" videos not supported. See https://github.com/ytdl-org/youtube-dl/issues/359 for more information.', expected=True) def _extract_filesize(media_url): return int_or_none(self._search_regex( r'\bclen[=/](\d+)', media_url, 'filesize', default=None)) streaming_formats = try_get(player_response, lambda x: x['streamingData']['formats'], list) or [] streaming_formats.extend(try_get(player_response, lambda x: x['streamingData']['adaptiveFormats'], list) or []) if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'): self.report_rtmp_download() formats = [{ 'format_id': '_rtmp', 'protocol': 'rtmp', 'url': video_info['conn'][0], 'player_url': player_url, }] elif not is_live and (streaming_formats or len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1): encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0] if 'rtmpe%3Dyes' in encoded_url_map: raise ExtractorError('rtmpe downloads are not supported, see https://github.com/ytdl-org/youtube-dl/issues/343 for more information.', expected=True) formats = [] formats_spec = {} fmt_list = video_info.get('fmt_list', [''])[0] if fmt_list: for fmt in fmt_list.split(','): spec = fmt.split('/') if len(spec) > 1: width_height = spec[1].split('x') if len(width_height) == 2: formats_spec[spec[0]] = { 'resolution': spec[1], 'width': int_or_none(width_height[0]), 'height': int_or_none(width_height[1]), } for fmt in streaming_formats: itag = str_or_none(fmt.get('itag')) if not itag: continue quality = fmt.get('quality') quality_label = fmt.get('qualityLabel') or quality formats_spec[itag] = { 'asr': int_or_none(fmt.get('audioSampleRate')), 'filesize': int_or_none(fmt.get('contentLength')), 'format_note': quality_label, 'fps': int_or_none(fmt.get('fps')), 'height': int_or_none(fmt.get('height')), # bitrate for itag 43 is always 2147483647 'tbr': float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000) if itag != '43' else None, 'width': int_or_none(fmt.get('width')), } for fmt in streaming_formats: if fmt.get('drmFamilies') or fmt.get('drm_families'): continue url = url_or_none(fmt.get('url')) if not url: cipher = fmt.get('cipher') or fmt.get('signatureCipher') if not cipher: continue url_data = compat_parse_qs(cipher) url = url_or_none(try_get(url_data, lambda x: x['url'][0], compat_str)) if not url: continue else: cipher = None url_data = compat_parse_qs(compat_urllib_parse_urlparse(url).query) stream_type = int_or_none(try_get(url_data, lambda x: x['stream_type'][0])) # Unsupported FORMAT_STREAM_TYPE_OTF if stream_type == 3: continue format_id = fmt.get('itag') or url_data['itag'][0] if not format_id: continue format_id = compat_str(format_id) if cipher: if 's' in url_data or self._downloader.params.get('youtube_include_dash_manifest', True): ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")' jsplayer_url_json = self._search_regex( ASSETS_RE, embed_webpage if age_gate else video_webpage, 'JS player URL (1)', default=None) if not jsplayer_url_json and not age_gate: # We need the embed website after all if embed_webpage is None: embed_url = proto + '://www.youtube.com/embed/%s' % video_id embed_webpage = self._download_webpage( embed_url, video_id, 'Downloading embed webpage') jsplayer_url_json = self._search_regex( ASSETS_RE, embed_webpage, 'JS player URL') player_url = json.loads(jsplayer_url_json) if player_url is None: player_url_json = self._search_regex( r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")', video_webpage, 'age gate player URL') player_url = json.loads(player_url_json) if 'sig' in url_data: url += '&signature=' + url_data['sig'][0] elif 's' in url_data: encrypted_sig = url_data['s'][0] if self._downloader.params.get('verbose'): if player_url is None: player_desc = 'unknown' else: player_type, player_version = self._extract_player_info(player_url) player_desc = '%s player %s' % ('flash' if player_type == 'swf' else 'html5', player_version) parts_sizes = self._signature_cache_id(encrypted_sig) self.to_screen('{%s} signature length %s, %s' % (format_id, parts_sizes, player_desc)) signature = self._decrypt_signature( encrypted_sig, video_id, player_url, age_gate) sp = try_get(url_data, lambda x: x['sp'][0], compat_str) or 'signature' url += '&%s=%s' % (sp, signature) if 'ratebypass' not in url: url += '&ratebypass=yes' dct = { 'format_id': format_id, 'url': url, 'player_url': player_url, } if format_id in self._formats: dct.update(self._formats[format_id]) if format_id in formats_spec: dct.update(formats_spec[format_id]) # Some itags are not included in DASH manifest thus corresponding formats will # lack metadata (see https://github.com/ytdl-org/youtube-dl/pull/5993). # Trying to extract metadata from url_encoded_fmt_stream_map entry. mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0]) width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None) if width is None: width = int_or_none(fmt.get('width')) if height is None: height = int_or_none(fmt.get('height')) filesize = int_or_none(url_data.get( 'clen', [None])[0]) or _extract_filesize(url) quality = url_data.get('quality', [None])[0] or fmt.get('quality') quality_label = url_data.get('quality_label', [None])[0] or fmt.get('qualityLabel') tbr = (float_or_none(url_data.get('bitrate', [None])[0], 1000) or float_or_none(fmt.get('bitrate'), 1000)) if format_id != '43' else None fps = int_or_none(url_data.get('fps', [None])[0]) or int_or_none(fmt.get('fps')) more_fields = { 'filesize': filesize, 'tbr': tbr, 'width': width, 'height': height, 'fps': fps, 'format_note': quality_label or quality, } for key, value in more_fields.items(): if value: dct[key] = value type_ = url_data.get('type', [None])[0] or fmt.get('mimeType') if type_: type_split = type_.split(';') kind_ext = type_split[0].split('/') if len(kind_ext) == 2: kind, _ = kind_ext dct['ext'] = mimetype2ext(type_split[0]) if kind in ('audio', 'video'): codecs = None for mobj in re.finditer( r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_): if mobj.group('key') == 'codecs': codecs = mobj.group('val') break if codecs: dct.update(parse_codecs(codecs)) if dct.get('acodec') == 'none' or dct.get('vcodec') == 'none': dct['downloader_options'] = { # Youtube throttles chunks >~10M 'http_chunk_size': 10485760, } formats.append(dct) else: manifest_url = ( url_or_none(try_get( player_response, lambda x: x['streamingData']['hlsManifestUrl'], compat_str)) or url_or_none(try_get( video_info, lambda x: x['hlsvp'][0], compat_str))) if manifest_url: formats = [] m3u8_formats = self._extract_m3u8_formats( manifest_url, video_id, 'mp4', fatal=False) for a_format in m3u8_formats: itag = self._search_regex( r'/itag/(\d+)/', a_format['url'], 'itag', default=None) if itag: a_format['format_id'] = itag if itag in self._formats: dct = self._formats[itag].copy() dct.update(a_format) a_format = dct a_format['player_url'] = player_url # Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True' formats.append(a_format) else: error_message = extract_unavailable_message() if not error_message: error_message = clean_html(try_get( player_response, lambda x: x['playabilityStatus']['reason'], compat_str)) if not error_message: error_message = clean_html( try_get(video_info, lambda x: x['reason'][0], compat_str)) if error_message: raise ExtractorError(error_message, expected=True) raise ExtractorError('no conn, hlsvp, hlsManifestUrl or url_encoded_fmt_stream_map information found in video info') # uploader video_uploader = try_get( video_info, lambda x: x['author'][0], compat_str) or str_or_none(video_details.get('author')) if video_uploader: video_uploader = compat_urllib_parse_unquote_plus(video_uploader) else: self._downloader.report_warning('unable to extract uploader name') # uploader_id video_uploader_id = None video_uploader_url = None mobj = re.search( r'<link itemprop="url" href="(?P<uploader_url>https?://www\.youtube\.com/(?:user|channel)/(?P<uploader_id>[^"]+))">', video_webpage) if mobj is not None: video_uploader_id = mobj.group('uploader_id') video_uploader_url = mobj.group('uploader_url') else: self._downloader.report_warning('unable to extract uploader nickname') channel_id = ( str_or_none(video_details.get('channelId')) or self._html_search_meta( 'channelId', video_webpage, 'channel id', default=None) or self._search_regex( r'data-channel-external-id=(["\'])(?P<id>(?:(?!\1).)+)\1', video_webpage, 'channel id', default=None, group='id')) channel_url = 'http://www.youtube.com/channel/%s' % channel_id if channel_id else None # thumbnail image # We try first to get a high quality image: m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">', video_webpage, re.DOTALL) if m_thumb is not None: video_thumbnail = m_thumb.group(1) elif 'thumbnail_url' not in video_info: self._downloader.report_warning('unable to extract video thumbnail') video_thumbnail = None else: # don't panic if we can't find it video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0]) # upload date upload_date = self._html_search_meta( 'datePublished', video_webpage, 'upload date', default=None) if not upload_date: upload_date = self._search_regex( [r'(?s)id="eow-date.*?>(.*?)</span>', r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'], video_webpage, 'upload date', default=None) upload_date = unified_strdate(upload_date) video_license = self._html_search_regex( r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li', video_webpage, 'license', default=None) m_music = re.search( r'''(?x) <h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s* <ul[^>]*>\s* <li>(?P<title>.+?) by (?P<creator>.+?) (?: \(.+?\)| <a[^>]* (?: \bhref=["\']/red[^>]*>| # drop possible >\s*Listen ad-free with YouTube Red # YouTube Red ad ) .*? )?</li ''', video_webpage) if m_music: video_alt_title = remove_quotes(unescapeHTML(m_music.group('title'))) video_creator = clean_html(m_music.group('creator')) else: video_alt_title = video_creator = None def extract_meta(field): return self._html_search_regex( r'<h4[^>]+class="title"[^>]*>\s*%s\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li>\s*' % field, video_webpage, field, default=None) track = extract_meta('Song') artist = extract_meta('Artist') album = extract_meta('Album') # Youtube Music Auto-generated description release_date = release_year = None if video_description: mobj = re.search(r'(?s)Provided to YouTube by [^\n]+\n+(?P<track>[^·]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?', video_description) if mobj: if not track: track = mobj.group('track').strip() if not artist: artist = mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·')) if not album: album = mobj.group('album'.strip()) release_year = mobj.group('release_year') release_date = mobj.group('release_date') if release_date: release_date = release_date.replace('-', '') if not release_year: release_year = int(release_date[:4]) if release_year: release_year = int(release_year) m_episode = re.search( r'<div[^>]+id="watch7-headline"[^>]*>\s*<span[^>]*>.*?>(?P<series>[^<]+)</a></b>\s*S(?P<season>\d+)\s*•\s*E(?P<episode>\d+)</span>', video_webpage) if m_episode: series = unescapeHTML(m_episode.group('series')) season_number = int(m_episode.group('season')) episode_number = int(m_episode.group('episode')) else: series = season_number = episode_number = None m_cat_container = self._search_regex( r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>', video_webpage, 'categories', default=None) if m_cat_container: category = self._html_search_regex( r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category', default=None) video_categories = None if category is None else [category] else: video_categories = None video_tags = [ unescapeHTML(m.group('content')) for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)] def _extract_count(count_name): return str_to_int(self._search_regex( r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>' % re.escape(count_name), video_webpage, count_name, default=None)) like_count = _extract_count('like') dislike_count = _extract_count('dislike') if view_count is None: view_count = str_to_int(self._search_regex( r'<[^>]+class=["\']watch-view-count[^>]+>\s*([\d,\s]+)', video_webpage, 'view count', default=None)) average_rating = ( float_or_none(video_details.get('averageRating')) or try_get(video_info, lambda x: float_or_none(x['avg_rating'][0]))) # subtitles video_subtitles = self.extract_subtitles(video_id, video_webpage) automatic_captions = self.extract_automatic_captions(video_id, video_webpage) video_duration = try_get( video_info, lambda x: int_or_none(x['length_seconds'][0])) if not video_duration: video_duration = int_or_none(video_details.get('lengthSeconds')) if not video_duration: video_duration = parse_duration(self._html_search_meta( 'duration', video_webpage, 'video duration')) # annotations video_annotations = None if self._downloader.params.get('writeannotations', False): xsrf_token = self._search_regex( r'([\'"])XSRF_TOKEN\1\s*:\s*([\'"])(?P<xsrf_token>[A-Za-z0-9+/=]+)\2', video_webpage, 'xsrf token', group='xsrf_token', fatal=False) invideo_url = try_get( player_response, lambda x: x['annotations'][0]['playerAnnotationsUrlsRenderer']['invideoUrl'], compat_str) if xsrf_token and invideo_url: xsrf_field_name = self._search_regex( r'([\'"])XSRF_FIELD_NAME\1\s*:\s*([\'"])(?P<xsrf_field_name>\w+)\2', video_webpage, 'xsrf field name', group='xsrf_field_name', default='session_token') video_annotations = self._download_webpage( self._proto_relative_url(invideo_url), video_id, note='Downloading annotations', errnote='Unable to download video annotations', fatal=False, data=urlencode_postdata({xsrf_field_name: xsrf_token})) chapters = self._extract_chapters(description_original, video_duration) # Look for the DASH manifest if self._downloader.params.get('youtube_include_dash_manifest', True): dash_mpd_fatal = True for mpd_url in dash_mpds: dash_formats = {} try: def decrypt_sig(mobj): s = mobj.group(1) dec_s = self._decrypt_signature(s, video_id, player_url, age_gate) return '/signature/%s' % dec_s mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url) for df in self._extract_mpd_formats( mpd_url, video_id, fatal=dash_mpd_fatal, formats_dict=self._formats): if not df.get('filesize'): df['filesize'] = _extract_filesize(df['url']) # Do not overwrite DASH format found in some previous DASH manifest if df['format_id'] not in dash_formats: dash_formats[df['format_id']] = df # Additional DASH manifests may end up in HTTP Error 403 therefore # allow them to fail without bug report message if we already have # some DASH manifest succeeded. This is temporary workaround to reduce # burst of bug reports until we figure out the reason and whether it # can be fixed at all. dash_mpd_fatal = False except (ExtractorError, KeyError) as e: self.report_warning( 'Skipping DASH manifest: %r' % e, video_id) if dash_formats: # Remove the formats we found through non-DASH, they # contain less info and it can be wrong, because we use # fixed values (for example the resolution). See # https://github.com/ytdl-org/youtube-dl/issues/5774 for an # example. formats = [f for f in formats if f['format_id'] not in dash_formats.keys()] formats.extend(dash_formats.values()) # Check for malformed aspect ratio stretched_m = re.search( r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">', video_webpage) if stretched_m: w = float(stretched_m.group('w')) h = float(stretched_m.group('h')) # yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0). # We will only process correct ratios. if w > 0 and h > 0: ratio = w / h for f in formats: if f.get('vcodec') != 'none': f['stretched_ratio'] = ratio if not formats: if 'reason' in video_info: if 'The uploader has not made this video available in your country.' in video_info['reason']: regions_allowed = self._html_search_meta( 'regionsAllowed', video_webpage, default=None) countries = regions_allowed.split(',') if regions_allowed else None self.raise_geo_restricted( msg=video_info['reason'][0], countries=countries) reason = video_info['reason'][0] if 'Invalid parameters' in reason: unavailable_message = extract_unavailable_message() if unavailable_message: reason = unavailable_message raise ExtractorError( 'YouTube said: %s' % reason, expected=True, video_id=video_id) if video_info.get('license_info') or try_get(player_response, lambda x: x['streamingData']['licenseInfos']): raise ExtractorError('This video is DRM protected.', expected=True) self._sort_formats(formats) self.mark_watched(video_id, video_info, player_response) return { 'id': video_id, 'uploader': video_uploader, 'uploader_id': video_uploader_id, 'uploader_url': video_uploader_url, 'channel_id': channel_id, 'channel_url': channel_url, 'upload_date': upload_date, 'license': video_license, 'creator': video_creator or artist, 'title': video_title, 'alt_title': video_alt_title or track, 'thumbnail': video_thumbnail, 'description': video_description, 'categories': video_categories, 'tags': video_tags, 'subtitles': video_subtitles, 'automatic_captions': automatic_captions, 'duration': video_duration, 'age_limit': 18 if age_gate else 0, 'annotations': video_annotations, 'chapters': chapters, 'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id, 'view_count': view_count, 'like_count': like_count, 'dislike_count': dislike_count, 'average_rating': average_rating, 'formats': formats, 'is_live': is_live, 'start_time': start_time, 'end_time': end_time, 'series': series, 'season_number': season_number, 'episode_number': episode_number, 'track': track, 'artist': artist, 'album': album, 'release_date': release_date, 'release_year': release_year, } class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor): IE_DESC = 'YouTube.com playlists' _VALID_URL = r"""(?x)(?: (?:https?://)? (?:\w+\.)? (?: (?: youtube(?:kids)?\.com| invidio\.us ) / (?: (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/(?:videoseries|[0-9A-Za-z_-]{11})) \? (?:.*?[&;])*? (?:p|a|list)= | p/ )| youtu\.be/[0-9A-Za-z_-]{11}\?.*?\blist= ) ( (?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)?[0-9A-Za-z-_]{10,} # Top tracks, they can also include dots |(?:MC)[\w\.]* ) .* | (%(playlist_id)s) )""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE} _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s' _VIDEO_RE_TPL = r'href="\s*/watch\?v=%s(?:&amp;(?:[^"]*?index=(?P<index>\d+))?(?:[^>]+>(?P<title>[^<]+))?)?' _VIDEO_RE = _VIDEO_RE_TPL % r'(?P<id>[0-9A-Za-z_-]{11})' IE_NAME = 'youtube:playlist' _TESTS = [{ 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc', 'info_dict': { 'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA', 'uploader': 'Sergey M.', 'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc', 'title': 'youtube-dl public playlist', }, 'playlist_count': 1, }, { 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf', 'info_dict': { 'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA', 'uploader': 'Sergey M.', 'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf', 'title': 'youtube-dl empty playlist', }, 'playlist_count': 0, }, { 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.', 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC', 'info_dict': { 'title': '29C3: Not my department', 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC', 'uploader': 'Christiaan008', 'uploader_id': 'ChRiStIaAn008', }, 'playlist_count': 96, }, { 'note': 'issue #673', 'url': 'PLBB231211A4F62143', 'info_dict': { 'title': '[OLD]Team Fortress 2 (Class-based LP)', 'id': 'PLBB231211A4F62143', 'uploader': 'Wickydoo', 'uploader_id': 'Wickydoo', }, 'playlist_mincount': 26, }, { 'note': 'Large playlist', 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q', 'info_dict': { 'title': 'Uploads from Cauchemar', 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q', 'uploader': 'Cauchemar', 'uploader_id': 'Cauchemar89', }, 'playlist_mincount': 799, }, { 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl', 'info_dict': { 'title': 'YDL_safe_search', 'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl', }, 'playlist_count': 2, 'skip': 'This playlist is private', }, { 'note': 'embedded', 'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu', 'playlist_count': 4, 'info_dict': { 'title': 'JODA15', 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu', 'uploader': 'milan', 'uploader_id': 'UCEI1-PVPcYXjB73Hfelbmaw', } }, { 'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl', 'playlist_mincount': 485, 'info_dict': { 'title': '2018 Chinese New Singles (11/6 updated)', 'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl', 'uploader': 'LBK', 'uploader_id': 'sdragonfang', } }, { 'note': 'Embedded SWF player', 'url': 'https://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0', 'playlist_count': 4, 'info_dict': { 'title': 'JODA7', 'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ', }, 'skip': 'This playlist does not exist', }, { 'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos', 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA', 'info_dict': { 'title': 'Uploads from Interstellar Movie', 'id': 'UUXw-G3eDE9trcvY2sBMM_aA', 'uploader': 'Interstellar Movie', 'uploader_id': 'InterstellarMovie1', }, 'playlist_mincount': 21, }, { # Playlist URL that does not actually serve a playlist 'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4', 'info_dict': { 'id': 'FqZTN594JQw', 'ext': 'webm', 'title': "Smiley's People 01 detective, Adventure Series, Action", 'uploader': 'STREEM', 'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng', 'upload_date': '20150526', 'license': 'Standard YouTube License', 'description': 'md5:507cdcb5a49ac0da37a920ece610be80', 'categories': ['People & Blogs'], 'tags': list, 'view_count': int, 'like_count': int, 'dislike_count': int, }, 'params': { 'skip_download': True, }, 'skip': 'This video is not available.', 'add_ie': [YoutubeIE.ie_key()], }, { 'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5', 'info_dict': { 'id': 'yeWKywCrFtk', 'ext': 'mp4', 'title': 'Small Scale Baler and Braiding Rugs', 'uploader': 'Backus-Page House Museum', 'uploader_id': 'backuspagemuseum', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum', 'upload_date': '20161008', 'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a', 'categories': ['Nonprofits & Activism'], 'tags': list, 'like_count': int, 'dislike_count': int, }, 'params': { 'noplaylist': True, 'skip_download': True, }, }, { # https://github.com/ytdl-org/youtube-dl/issues/21844 'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba', 'info_dict': { 'title': 'Data Analysis with Dr Mike Pound', 'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba', 'uploader_id': 'Computerphile', 'uploader': 'Computerphile', }, 'playlist_mincount': 11, }, { 'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21', 'only_matching': True, }, { 'url': 'TLGGrESM50VT6acwMjAyMjAxNw', 'only_matching': True, }, { # music album playlist 'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM', 'only_matching': True, }, { 'url': 'https://invidio.us/playlist?list=PLDIoUOhQQPlXr63I_vwF9GD8sAKh77dWU', 'only_matching': True, }, { 'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g', 'only_matching': True, }] def _real_initialize(self): self._login() def extract_videos_from_page(self, page): ids_in_page = [] titles_in_page = [] for item in re.findall( r'(<[^>]*\bdata-video-id\s*=\s*["\'][0-9A-Za-z_-]{11}[^>]+>)', page): attrs = extract_attributes(item) video_id = attrs['data-video-id'] video_title = unescapeHTML(attrs.get('data-title')) if video_title: video_title = video_title.strip() ids_in_page.append(video_id) titles_in_page.append(video_title) # Fallback with old _VIDEO_RE self.extract_videos_from_page_impl( self._VIDEO_RE, page, ids_in_page, titles_in_page) # Relaxed fallbacks self.extract_videos_from_page_impl( r'href="\s*/watch\?v\s*=\s*(?P<id>[0-9A-Za-z_-]{11})', page, ids_in_page, titles_in_page) self.extract_videos_from_page_impl( r'data-video-ids\s*=\s*["\'](?P<id>[0-9A-Za-z_-]{11})', page, ids_in_page, titles_in_page) return zip(ids_in_page, titles_in_page) def _extract_mix(self, playlist_id): # The mixes are generated from a single video # the id of the playlist is just 'RD' + video_id ids = [] last_id = playlist_id[-11:] for n in itertools.count(1): url = 'https://youtube.com/watch?v=%s&list=%s' % (last_id, playlist_id) webpage = self._download_webpage( url, playlist_id, 'Downloading page {0} of Youtube mix'.format(n)) new_ids = orderedSet(re.findall( r'''(?xs)data-video-username=".*?".*? href="/watch\?v=([0-9A-Za-z_-]{11})&amp;[^"]*?list=%s''' % re.escape(playlist_id), webpage)) # Fetch new pages until all the videos are repeated, it seems that # there are always 51 unique videos. new_ids = [_id for _id in new_ids if _id not in ids] if not new_ids: break ids.extend(new_ids) last_id = ids[-1] url_results = self._ids_to_results(ids) search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage) title_span = ( search_title('playlist-title') or search_title('title long-title') or search_title('title')) title = clean_html(title_span) return self.playlist_result(url_results, playlist_id, title) def _extract_playlist(self, playlist_id): url = self._TEMPLATE_URL % playlist_id page = self._download_webpage(url, playlist_id) # the yt-alert-message now has tabindex attribute (see https://github.com/ytdl-org/youtube-dl/issues/11604) for match in re.findall(r'<div class="yt-alert-message"[^>]*>([^<]+)</div>', page): match = match.strip() # Check if the playlist exists or is private mobj = re.match(r'[^<]*(?:The|This) playlist (?P<reason>does not exist|is private)[^<]*', match) if mobj: reason = mobj.group('reason') message = 'This playlist %s' % reason if 'private' in reason: message += ', use --username or --netrc to access it' message += '.' raise ExtractorError(message, expected=True) elif re.match(r'[^<]*Invalid parameters[^<]*', match): raise ExtractorError( 'Invalid parameters. Maybe URL is incorrect.', expected=True) elif re.match(r'[^<]*Choose your language[^<]*', match): continue else: self.report_warning('Youtube gives an alert message: ' + match) playlist_title = self._html_search_regex( r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>', page, 'title', default=None) _UPLOADER_BASE = r'class=["\']pl-header-details[^>]+>\s*<li>\s*<a[^>]+\bhref=' uploader = self._html_search_regex( r'%s["\']/(?:user|channel)/[^>]+>([^<]+)' % _UPLOADER_BASE, page, 'uploader', default=None) mobj = re.search( r'%s(["\'])(?P<path>/(?:user|channel)/(?P<uploader_id>.+?))\1' % _UPLOADER_BASE, page) if mobj: uploader_id = mobj.group('uploader_id') uploader_url = compat_urlparse.urljoin(url, mobj.group('path')) else: uploader_id = uploader_url = None has_videos = True if not playlist_title: try: # Some playlist URLs don't actually serve a playlist (e.g. # https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4) next(self._entries(page, playlist_id)) except StopIteration: has_videos = False playlist = self.playlist_result( self._entries(page, playlist_id), playlist_id, playlist_title) playlist.update({ 'uploader': uploader, 'uploader_id': uploader_id, 'uploader_url': uploader_url, }) return has_videos, playlist def _check_download_just_video(self, url, playlist_id): # Check if it's a video-specific URL query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query) video_id = query_dict.get('v', [None])[0] or self._search_regex( r'(?:(?:^|//)youtu\.be/|youtube\.com/embed/(?!videoseries))([0-9A-Za-z_-]{11})', url, 'video id', default=None) if video_id: if self._downloader.params.get('noplaylist'): self.to_screen('Downloading just video %s because of --no-playlist' % video_id) return video_id, self.url_result(video_id, 'Youtube', video_id=video_id) else: self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id)) return video_id, None return None, None def _real_extract(self, url): # Extract playlist id mobj = re.match(self._VALID_URL, url) if mobj is None: raise ExtractorError('Invalid URL: %s' % url) playlist_id = mobj.group(1) or mobj.group(2) video_id, video = self._check_download_just_video(url, playlist_id) if video: return video if playlist_id.startswith(('RD', 'UL', 'PU')): # Mixes require a custom extraction process return self._extract_mix(playlist_id) has_videos, playlist = self._extract_playlist(playlist_id) if has_videos or not video_id: return playlist # Some playlist URLs don't actually serve a playlist (see # https://github.com/ytdl-org/youtube-dl/issues/10537). # Fallback to plain video extraction if there is a video id # along with playlist id. return self.url_result(video_id, 'Youtube', video_id=video_id) class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor): IE_DESC = 'YouTube.com channels' _VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie|kids)?\.com|(?:www\.)?invidio\.us)/channel/(?P<id>[0-9A-Za-z_-]+)' _TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos' _VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?' IE_NAME = 'youtube:channel' _TESTS = [{ 'note': 'paginated channel', 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w', 'playlist_mincount': 91, 'info_dict': { 'id': 'UUKfVa3S1e4PHvxWcwyMMg8w', 'title': 'Uploads from lex will', 'uploader': 'lex will', 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w', } }, { 'note': 'Age restricted channel', # from https://www.youtube.com/user/DeusExOfficial 'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w', 'playlist_mincount': 64, 'info_dict': { 'id': 'UUs0ifCMCm1icqRbqhUINa0w', 'title': 'Uploads from Deus Ex', 'uploader': 'Deus Ex', 'uploader_id': 'DeusExOfficial', }, }, { 'url': 'https://invidio.us/channel/UC23qupoDRn9YOAVzeoxjOQA', 'only_matching': True, }, { 'url': 'https://www.youtubekids.com/channel/UCyu8StPfZWapR6rfW_JgqcA', 'only_matching': True, }] @classmethod def suitable(cls, url): return (False if YoutubePlaylistsIE.suitable(url) or YoutubeLiveIE.suitable(url) else super(YoutubeChannelIE, cls).suitable(url)) def _build_template_url(self, url, channel_id): return self._TEMPLATE_URL % channel_id def _real_extract(self, url): channel_id = self._match_id(url) url = self._build_template_url(url, channel_id) # Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778) # Workaround by extracting as a playlist if managed to obtain channel playlist URL # otherwise fallback on channel by page extraction channel_page = self._download_webpage( url + '?view=57', channel_id, 'Downloading channel page', fatal=False) if channel_page is False: channel_playlist_id = False else: channel_playlist_id = self._html_search_meta( 'channelId', channel_page, 'channel id', default=None) if not channel_playlist_id: channel_url = self._html_search_meta( ('al:ios:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad'), channel_page, 'channel url', default=None) if channel_url: channel_playlist_id = self._search_regex( r'vnd\.youtube://user/([0-9A-Za-z_-]+)', channel_url, 'channel id', default=None) if channel_playlist_id and channel_playlist_id.startswith('UC'): playlist_id = 'UU' + channel_playlist_id[2:] return self.url_result( compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist') channel_page = self._download_webpage(url, channel_id, 'Downloading page #1') autogenerated = re.search(r'''(?x) class="[^"]*?(?: channel-header-autogenerated-label| yt-channel-title-autogenerated )[^"]*"''', channel_page) is not None if autogenerated: # The videos are contained in a single page # the ajax pages can't be used, they are empty entries = [ self.url_result( video_id, 'Youtube', video_id=video_id, video_title=video_title) for video_id, video_title in self.extract_videos_from_page(channel_page)] return self.playlist_result(entries, channel_id) try: next(self._entries(channel_page, channel_id)) except StopIteration: alert_message = self._html_search_regex( r'(?s)<div[^>]+class=(["\']).*?\byt-alert-message\b.*?\1[^>]*>(?P<alert>[^<]+)</div>', channel_page, 'alert', default=None, group='alert') if alert_message: raise ExtractorError('Youtube said: %s' % alert_message, expected=True) return self.playlist_result(self._entries(channel_page, channel_id), channel_id) class YoutubeUserIE(YoutubeChannelIE): IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)' _VALID_URL = r'(?:(?:https?://(?:\w+\.)?youtube\.com/(?:(?P<user>user|c)/)?(?!(?:attribution_link|watch|results|shared)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)' _TEMPLATE_URL = 'https://www.youtube.com/%s/%s/videos' IE_NAME = 'youtube:user' _TESTS = [{ 'url': 'https://www.youtube.com/user/TheLinuxFoundation', 'playlist_mincount': 320, 'info_dict': { 'id': 'UUfX55Sx5hEFjoC3cNs6mCUQ', 'title': 'Uploads from The Linux Foundation', 'uploader': 'The Linux Foundation', 'uploader_id': 'TheLinuxFoundation', } }, { # Only available via https://www.youtube.com/c/12minuteathlete/videos # but not https://www.youtube.com/user/12minuteathlete/videos 'url': 'https://www.youtube.com/c/12minuteathlete/videos', 'playlist_mincount': 249, 'info_dict': { 'id': 'UUVjM-zV6_opMDx7WYxnjZiQ', 'title': 'Uploads from 12 Minute Athlete', 'uploader': '12 Minute Athlete', 'uploader_id': 'the12minuteathlete', } }, { 'url': 'ytuser:phihag', 'only_matching': True, }, { 'url': 'https://www.youtube.com/c/gametrailers', 'only_matching': True, }, { 'url': 'https://www.youtube.com/gametrailers', 'only_matching': True, }, { # This channel is not available, geo restricted to JP 'url': 'https://www.youtube.com/user/kananishinoSMEJ/videos', 'only_matching': True, }] @classmethod def suitable(cls, url): # Don't return True if the url can be extracted with other youtube # extractor, the regex would is too permissive and it would match. other_yt_ies = iter(klass for (name, klass) in globals().items() if name.startswith('Youtube') and name.endswith('IE') and klass is not cls) if any(ie.suitable(url) for ie in other_yt_ies): return False else: return super(YoutubeUserIE, cls).suitable(url) def _build_template_url(self, url, channel_id): mobj = re.match(self._VALID_URL, url) return self._TEMPLATE_URL % (mobj.group('user') or 'user', mobj.group('id')) class YoutubeLiveIE(YoutubeBaseInfoExtractor): IE_DESC = 'YouTube.com live streams' _VALID_URL = r'(?P<base_url>https?://(?:\w+\.)?youtube\.com/(?:(?:user|channel|c)/)?(?P<id>[^/]+))/live' IE_NAME = 'youtube:live' _TESTS = [{ 'url': 'https://www.youtube.com/user/TheYoungTurks/live', 'info_dict': { 'id': 'a48o2S1cPoo', 'ext': 'mp4', 'title': 'The Young Turks - Live Main Show', 'uploader': 'The Young Turks', 'uploader_id': 'TheYoungTurks', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks', 'upload_date': '20150715', 'license': 'Standard YouTube License', 'description': 'md5:438179573adcdff3c97ebb1ee632b891', 'categories': ['News & Politics'], 'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'], 'like_count': int, 'dislike_count': int, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live', 'only_matching': True, }, { 'url': 'https://www.youtube.com/c/CommanderVideoHq/live', 'only_matching': True, }, { 'url': 'https://www.youtube.com/TheYoungTurks/live', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) channel_id = mobj.group('id') base_url = mobj.group('base_url') webpage = self._download_webpage(url, channel_id, fatal=False) if webpage: page_type = self._og_search_property( 'type', webpage, 'page type', default='') video_id = self._html_search_meta( 'videoId', webpage, 'video id', default=None) if page_type.startswith('video') and video_id and re.match( r'^[0-9A-Za-z_-]{11}$', video_id): return self.url_result(video_id, YoutubeIE.ie_key()) return self.url_result(base_url) class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor): IE_DESC = 'YouTube.com user/channel playlists' _VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists' IE_NAME = 'youtube:playlists' _TESTS = [{ 'url': 'https://www.youtube.com/user/ThirstForScience/playlists', 'playlist_mincount': 4, 'info_dict': { 'id': 'ThirstForScience', 'title': 'ThirstForScience', }, }, { # with "Load more" button 'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd', 'playlist_mincount': 70, 'info_dict': { 'id': 'igorkle1', 'title': 'Игорь Клейнер', }, }, { 'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists', 'playlist_mincount': 17, 'info_dict': { 'id': 'UCiU1dHvZObB2iP6xkJ__Icw', 'title': 'Chem Player', }, 'skip': 'Blocked', }] class YoutubeSearchBaseInfoExtractor(YoutubePlaylistBaseInfoExtractor): _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})(?:[^"]*"[^>]+\btitle="(?P<title>[^"]+))?' class YoutubeSearchIE(SearchInfoExtractor, YoutubeSearchBaseInfoExtractor): IE_DESC = 'YouTube.com searches' # there doesn't appear to be a real limit, for example if you search for # 'python' you get more than 8.000.000 results _MAX_RESULTS = float('inf') IE_NAME = 'youtube:search' _SEARCH_KEY = 'ytsearch' _EXTRA_QUERY_ARGS = {} _TESTS = [] def _get_n_results(self, query, n): """Get a specified number of results for a query""" videos = [] limit = n url_query = { 'search_query': query.encode('utf-8'), } url_query.update(self._EXTRA_QUERY_ARGS) result_url = 'https://www.youtube.com/results?' + compat_urllib_parse_urlencode(url_query) for pagenum in itertools.count(1): data = self._download_json( result_url, video_id='query "%s"' % query, note='Downloading page %s' % pagenum, errnote='Unable to download API page', query={'spf': 'navigate'}) html_content = data[1]['body']['content'] if 'class="search-message' in html_content: raise ExtractorError( '[youtube] No video results', expected=True) new_videos = list(self._process_page(html_content)) videos += new_videos if not new_videos or len(videos) > limit: break next_link = self._html_search_regex( r'href="(/results\?[^"]*\bsp=[^"]+)"[^>]*>\s*<span[^>]+class="[^"]*\byt-uix-button-content\b[^"]*"[^>]*>Next', html_content, 'next link', default=None) if next_link is None: break result_url = compat_urlparse.urljoin('https://www.youtube.com/', next_link) if len(videos) > n: videos = videos[:n] return self.playlist_result(videos, query) class YoutubeSearchDateIE(YoutubeSearchIE): IE_NAME = YoutubeSearchIE.IE_NAME + ':date' _SEARCH_KEY = 'ytsearchdate' IE_DESC = 'YouTube.com searches, newest videos first' _EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'} class YoutubeSearchURLIE(YoutubeSearchBaseInfoExtractor): IE_DESC = 'YouTube.com search URLs' IE_NAME = 'youtube:search_url' _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)' _TESTS = [{ 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', 'playlist_mincount': 5, 'info_dict': { 'title': 'youtube-dl test video', } }, { 'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) query = compat_urllib_parse_unquote_plus(mobj.group('query')) webpage = self._download_webpage(url, query) return self.playlist_result(self._process_page(webpage), playlist_title=query) class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor): IE_DESC = 'YouTube.com (multi-season) shows' _VALID_URL = r'https?://(?:www\.)?youtube\.com/show/(?P<id>[^?#]*)' IE_NAME = 'youtube:show' _TESTS = [{ 'url': 'https://www.youtube.com/show/airdisasters', 'playlist_mincount': 5, 'info_dict': { 'id': 'airdisasters', 'title': 'Air Disasters', } }] def _real_extract(self, url): playlist_id = self._match_id(url) return super(YoutubeShowIE, self)._real_extract( 'https://www.youtube.com/show/%s/playlists' % playlist_id) class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor): """ Base class for feed extractors Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties. """ _LOGIN_REQUIRED = True @property def IE_NAME(self): return 'youtube:%s' % self._FEED_NAME def _real_initialize(self): self._login() def _entries(self, page): # The extraction process is the same as for playlists, but the regex # for the video ids doesn't contain an index ids = [] more_widget_html = content_html = page for page_num in itertools.count(1): matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html) # 'recommended' feed has infinite 'load more' and each new portion spins # the same videos in (sometimes) slightly different order, so we'll check # for unicity and break when portion has no new videos new_ids = list(filter(lambda video_id: video_id not in ids, orderedSet(matches))) if not new_ids: break ids.extend(new_ids) for entry in self._ids_to_results(new_ids): yield entry mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html) if not mobj: break more = self._download_json( 'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE, 'Downloading page #%s' % page_num, transform_source=uppercase_escape) content_html = more['content_html'] more_widget_html = more['load_more_widget_html'] def _real_extract(self, url): page = self._download_webpage( 'https://www.youtube.com/feed/%s' % self._FEED_NAME, self._PLAYLIST_TITLE) return self.playlist_result( self._entries(page), playlist_title=self._PLAYLIST_TITLE) class YoutubeWatchLaterIE(YoutubePlaylistIE): IE_NAME = 'youtube:watchlater' IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)' _VALID_URL = r'https?://(?:www\.)?youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater' _TESTS = [{ 'url': 'https://www.youtube.com/playlist?list=WL', 'only_matching': True, }, { 'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL', 'only_matching': True, }] def _real_extract(self, url): _, video = self._check_download_just_video(url, 'WL') if video: return video _, playlist = self._extract_playlist('WL') return playlist class YoutubeFavouritesIE(YoutubeBaseInfoExtractor): IE_NAME = 'youtube:favorites' IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)' _VALID_URL = r'https?://(?:www\.)?youtube\.com/my_favorites|:ytfav(?:ou?rites)?' _LOGIN_REQUIRED = True def _real_extract(self, url): webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos') playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id') return self.url_result(playlist_id, 'YoutubePlaylist') class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor): IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)' _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/recommended|:ytrec(?:ommended)?' _FEED_NAME = 'recommended' _PLAYLIST_TITLE = 'Youtube Recommended videos' class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor): IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)' _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?' _FEED_NAME = 'subscriptions' _PLAYLIST_TITLE = 'Youtube Subscriptions' class YoutubeHistoryIE(YoutubeFeedsInfoExtractor): IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)' _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/history|:ythistory' _FEED_NAME = 'history' _PLAYLIST_TITLE = 'Youtube History' class YoutubeTruncatedURLIE(InfoExtractor): IE_NAME = 'youtube:truncated_url' IE_DESC = False # Do not list _VALID_URL = r'''(?x) (?:https?://)? (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/ (?:watch\?(?: feature=[a-z_]+| annotation_id=annotation_[^&]+| x-yt-cl=[0-9]+| hl=[^&]*| t=[0-9]+ )? | attribution_link\?a=[^&]+ ) $ ''' _TESTS = [{ 'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041', 'only_matching': True, }, { 'url': 'https://www.youtube.com/watch?', 'only_matching': True, }, { 'url': 'https://www.youtube.com/watch?x-yt-cl=84503534', 'only_matching': True, }, { 'url': 'https://www.youtube.com/watch?feature=foo', 'only_matching': True, }, { 'url': 'https://www.youtube.com/watch?hl=en-GB', 'only_matching': True, }, { 'url': 'https://www.youtube.com/watch?t=2372', 'only_matching': True, }] def _real_extract(self, url): raise ExtractorError( 'Did you forget to quote the URL? Remember that & is a meta ' 'character in most shells, so you want to put the URL in quotes, ' 'like youtube-dl ' '"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" ' ' or simply youtube-dl BaW_jenozKc .', expected=True) class YoutubeTruncatedIDIE(InfoExtractor): IE_NAME = 'youtube:truncated_id' IE_DESC = False # Do not list _VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$' _TESTS = [{ 'url': 'https://www.youtube.com/watch?v=N_708QY7Ob', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) raise ExtractorError( 'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url), expected=True) # coding: utf-8 from __future__ import unicode_literals import itertools import json import os.path import random import re import time import traceback from .common import InfoExtractor, SearchInfoExtractor from ..jsinterp import JSInterpreter from ..swfinterp import SWFInterpreter from ..compat import ( compat_chr, compat_HTTPError, compat_kwargs, compat_parse_qs, compat_urllib_parse_unquote, compat_urllib_parse_unquote_plus, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, compat_urlparse, compat_str, ) from ..utils import ( bool_or_none, clean_html, error_to_compat_str, extract_attributes, ExtractorError, float_or_none, get_element_by_attribute, get_element_by_id, int_or_none, mimetype2ext, orderedSet, parse_codecs, parse_duration, remove_quotes, remove_start, smuggle_url, str_or_none, str_to_int, try_get, unescapeHTML, unified_strdate, unsmuggle_url, uppercase_escape, url_or_none, urlencode_postdata, ) class YoutubeBaseInfoExtractor(InfoExtractor): """Provide base functions for Youtube extractors""" _LOGIN_URL = 'https://accounts.google.com/ServiceLogin' _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge' _LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup' _CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge' _TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}' _NETRC_MACHINE = 'youtube' # If True it will raise an error if no login info is provided _LOGIN_REQUIRED = False _PLAYLIST_ID_RE = r'(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}' def _set_language(self): self._set_cookie( '.youtube.com', 'PREF', 'f1=50000000&hl=en', # YouTube sets the expire time to about two months expire_time=time.time() + 2 * 30 * 24 * 3600) def _ids_to_results(self, ids): return [ self.url_result(vid_id, 'Youtube', video_id=vid_id) for vid_id in ids] def _login(self): """ Attempt to log in to YouTube. True is returned if successful or skipped. False is returned if login failed. If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised. """ username, password = self._get_login_info() # No authentication to be performed if username is None: if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None: raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True) return True login_page = self._download_webpage( self._LOGIN_URL, None, note='Downloading login page', errnote='unable to fetch login page', fatal=False) if login_page is False: return login_form = self._hidden_inputs(login_page) def req(url, f_req, note, errnote): data = login_form.copy() data.update({ 'pstMsg': 1, 'checkConnection': 'youtube', 'checkedDomains': 'youtube', 'hl': 'en', 'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]', 'f.req': json.dumps(f_req), 'flowName': 'GlifWebSignIn', 'flowEntry': 'ServiceLogin', # TODO: reverse actual botguard identifier generation algo 'bgRequest': '["identifier",""]', }) return self._download_json( url, None, note=note, errnote=errnote, transform_source=lambda s: re.sub(r'^[^[]*', '', s), fatal=False, data=urlencode_postdata(data), headers={ 'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8', 'Google-Accounts-XSRF': 1, }) def warn(message): self._downloader.report_warning(message) lookup_req = [ username, None, [], None, 'US', None, None, 2, False, True, [ None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4], 1, [None, None, []], None, None, None, True ], username, ] lookup_results = req( self._LOOKUP_URL, lookup_req, 'Looking up account info', 'Unable to look up account info') if lookup_results is False: return False user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str) if not user_hash: warn('Unable to extract user hash') return False challenge_req = [ user_hash, None, 1, None, [1, None, None, None, [password, None, True]], [ None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4], 1, [None, None, []], None, None, None, True ]] challenge_results = req( self._CHALLENGE_URL, challenge_req, 'Logging in', 'Unable to log in') if challenge_results is False: return login_res = try_get(challenge_results, lambda x: x[0][5], list) if login_res: login_msg = try_get(login_res, lambda x: x[5], compat_str) warn( 'Unable to login: %s' % 'Invalid password' if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg) return False res = try_get(challenge_results, lambda x: x[0][-1], list) if not res: warn('Unable to extract result entry') return False login_challenge = try_get(res, lambda x: x[0][0], list) if login_challenge: challenge_str = try_get(login_challenge, lambda x: x[2], compat_str) if challenge_str == 'TWO_STEP_VERIFICATION': # SEND_SUCCESS - TFA code has been successfully sent to phone # QUOTA_EXCEEDED - reached the limit of TFA codes status = try_get(login_challenge, lambda x: x[5], compat_str) if status == 'QUOTA_EXCEEDED': warn('Exceeded the limit of TFA codes, try later') return False tl = try_get(challenge_results, lambda x: x[1][2], compat_str) if not tl: warn('Unable to extract TL') return False tfa_code = self._get_tfa_info('2-step verification code') if not tfa_code: warn( 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>' '(Note that only TOTP (Google Authenticator App) codes work at this time.)') return False tfa_code = remove_start(tfa_code, 'G-') tfa_req = [ user_hash, None, 2, None, [ 9, None, None, None, None, None, None, None, [None, tfa_code, True, 2] ]] tfa_results = req( self._TFA_URL.format(tl), tfa_req, 'Submitting TFA code', 'Unable to submit TFA code') if tfa_results is False: return False tfa_res = try_get(tfa_results, lambda x: x[0][5], list) if tfa_res: tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str) warn( 'Unable to finish TFA: %s' % 'Invalid TFA code' if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg) return False check_cookie_url = try_get( tfa_results, lambda x: x[0][-1][2], compat_str) else: CHALLENGES = { 'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.", 'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.', 'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.", } challenge = CHALLENGES.get( challenge_str, '%s returned error %s.' % (self.IE_NAME, challenge_str)) warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge) return False else: check_cookie_url = try_get(res, lambda x: x[2], compat_str) if not check_cookie_url: warn('Unable to extract CheckCookie URL') return False check_cookie_results = self._download_webpage( check_cookie_url, None, 'Checking cookie', fatal=False) if check_cookie_results is False: return False if 'https://myaccount.google.com/' not in check_cookie_results: warn('Unable to log in') return False return True def _download_webpage_handle(self, *args, **kwargs): query = kwargs.get('query', {}).copy() query['disable_polymer'] = 'true' kwargs['query'] = query return super(YoutubeBaseInfoExtractor, self)._download_webpage_handle( *args, **compat_kwargs(kwargs)) def _real_initialize(self): if self._downloader is None: return self._set_language() if not self._login(): return class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor): # Extract entries from page with "Load more" button def _entries(self, page, playlist_id): more_widget_html = content_html = page for page_num in itertools.count(1): for entry in self._process_page(content_html): yield entry mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html) if not mobj: break count = 0 retries = 3 while count <= retries: try: # Downloading page may result in intermittent 5xx HTTP error # that is usually worked around with a retry more = self._download_json( 'https://youtube.com/%s' % mobj.group('more'), playlist_id, 'Downloading page #%s%s' % (page_num, ' (retry #%d)' % count if count else ''), transform_source=uppercase_escape) break except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503): count += 1 if count <= retries: continue raise content_html = more['content_html'] if not content_html.strip(): # Some webpages show a "Load more" button but they don't # have more videos break more_widget_html = more['load_more_widget_html'] class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor): def _process_page(self, content): for video_id, video_title in self.extract_videos_from_page(content): yield self.url_result(video_id, 'Youtube', video_id, video_title) def extract_videos_from_page_impl(self, video_re, page, ids_in_page, titles_in_page): for mobj in re.finditer(video_re, page): # The link with index 0 is not the first video of the playlist (not sure if still actual) if 'index' in mobj.groupdict() and mobj.group('id') == '0': continue video_id = mobj.group('id') video_title = unescapeHTML( mobj.group('title')) if 'title' in mobj.groupdict() else None if video_title: video_title = video_title.strip() if video_title == '► Play all': video_title = None try: idx = ids_in_page.index(video_id) if video_title and not titles_in_page[idx]: titles_in_page[idx] = video_title except ValueError: ids_in_page.append(video_id) titles_in_page.append(video_title) def extract_videos_from_page(self, page): ids_in_page = [] titles_in_page = [] self.extract_videos_from_page_impl( self._VIDEO_RE, page, ids_in_page, titles_in_page) return zip(ids_in_page, titles_in_page) class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor): def _process_page(self, content): for playlist_id in orderedSet(re.findall( r'<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*><a[^>]+href="/?playlist\?list=([0-9A-Za-z-_]{10,})"', content)): yield self.url_result( 'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist') def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) title = self._og_search_title(webpage, fatal=False) return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title) class YoutubeIE(YoutubeBaseInfoExtractor): IE_DESC = 'YouTube.com' _VALID_URL = r"""(?x)^ ( (?:https?://|//) # http(s):// or protocol-independent URL (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com/| (?:www\.)?deturl\.com/www\.youtube\.com/| (?:www\.)?pwnyoutube\.com/| (?:www\.)?hooktube\.com/| (?:www\.)?yourepeat\.com/| tube\.majestyc\.net/| # Invidious instances taken from https://github.com/omarroth/invidious/wiki/Invidious-Instances (?:(?:www|dev)\.)?invidio\.us/| (?:(?:www|no)\.)?invidiou\.sh/| (?:(?:www|fi|de)\.)?invidious\.snopyta\.org/| (?:www\.)?invidious\.kabi\.tk/| (?:www\.)?invidious\.13ad\.de/| (?:www\.)?invidious\.mastodon\.host/| (?:www\.)?invidious\.nixnet\.xyz/| (?:www\.)?invidious\.drycat\.fr/| (?:www\.)?tube\.poal\.co/| (?:www\.)?vid\.wxzm\.sx/| (?:www\.)?yewtu\.be/| (?:www\.)?yt\.elukerio\.org/| (?:www\.)?yt\.lelux\.fi/| (?:www\.)?invidious\.ggc-project\.de/| (?:www\.)?yt\.maisputain\.ovh/| (?:www\.)?invidious\.13ad\.de/| (?:www\.)?invidious\.toot\.koeln/| (?:www\.)?invidious\.fdn\.fr/| (?:www\.)?watch\.nettohikari\.com/| (?:www\.)?kgg2m7yk5aybusll\.onion/| (?:www\.)?qklhadlycap4cnod\.onion/| (?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion/| (?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion/| (?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion/| (?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion/| (?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p/| (?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion/| youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains (?:.*?\#/)? # handle anchor (#/) redirect urls (?: # the various things that can precede the ID: (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/ |(?: # or the v= param in all its forms (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx) (?:\?|\#!?) # the params delimiter ? or # or #! (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&amp;v=V36LpHqtcDY) v= ) )) |(?: youtu\.be| # just youtu.be/xxxx vid\.plus| # or vid.plus/xxxx zwearz\.com/watch| # or zwearz.com/watch/xxxx )/ |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId= ) )? # all until now is optional -> you can pass the naked ID ([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID (?!.*?\blist= (?: %(playlist_id)s| # combined list/video URLs are handled by the playlist IE WL # WL are handled by the watch later IE ) ) (?(1).+)? # if we found the ID, everything can follow $""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE} _NEXT_URL_RE = r'[\?&]next_url=([^&]+)' _PLAYER_INFO_RE = ( r'/(?P<id>[a-zA-Z0-9_-]{8,})/player_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?/base\.(?P<ext>[a-z]+)$', r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.(?P<ext>[a-z]+)$', ) _formats = { '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'}, '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'}, '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'}, '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'}, '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'}, '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'}, '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'}, '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'}, # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'}, '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'}, '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'}, '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'}, '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'}, '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'}, '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'}, '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'}, '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'}, # 3D videos '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20}, '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20}, '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20}, '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20}, '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20}, '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20}, '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20}, # Apple HTTP Live Streaming '91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10}, '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10}, '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10}, '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10}, '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10}, '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10}, '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10}, '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10}, # DASH mp4 video '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'}, '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'}, '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'}, '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'}, '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'}, '138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559) '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'}, '212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'}, '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'}, '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60}, '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60}, '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'}, # Dash mp4 audio '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'}, '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'}, '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'}, '256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'}, '258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'}, '325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'}, '328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'}, # Dash webm '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'}, '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'}, '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'}, '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'}, '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'}, '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'}, '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'}, '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'}, '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'}, '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'}, '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'}, '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'}, '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'}, '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'}, '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'}, # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug) '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'}, '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60}, '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60}, '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60}, '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'}, '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60}, # Dash webm audio '171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128}, '172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256}, # Dash webm audio with opus inside '249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50}, '250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70}, '251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160}, # RTMP (unnamed) '_rtmp': {'protocol': 'rtmp'}, # av01 video only formats sometimes served with "unknown" codecs '394': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'}, '395': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'}, '396': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'}, '397': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'}, } _SUBTITLE_FORMATS = ('srv1', 'srv2', 'srv3', 'ttml', 'vtt') _GEO_BYPASS = False IE_NAME = 'youtube' _TESTS = [ { 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9', 'info_dict': { 'id': 'BaW_jenozKc', 'ext': 'mp4', 'title': 'youtube-dl test video "\'/\\ä↭𝕐', 'uploader': 'Philipp Hagemeister', 'uploader_id': 'phihag', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag', 'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q', 'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q', 'upload_date': '20121002', 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact [email protected] .', 'categories': ['Science & Technology'], 'tags': ['youtube-dl'], 'duration': 10, 'view_count': int, 'like_count': int, 'dislike_count': int, 'start_time': 1, 'end_time': 9, } }, { 'url': 'https://www.youtube.com/watch?v=UxxajLWwzqY', 'note': 'Test generic use_cipher_signature video (#897)', 'info_dict': { 'id': 'UxxajLWwzqY', 'ext': 'mp4', 'upload_date': '20120506', 'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]', 'alt_title': 'I Love It (feat. Charli XCX)', 'description': 'md5:19a2f98d9032b9311e686ed039564f63', 'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli', 'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop', 'iconic ep', 'iconic', 'love', 'it'], 'duration': 180, 'uploader': 'Icona Pop', 'uploader_id': 'IconaPop', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IconaPop', 'creator': 'Icona Pop', 'track': 'I Love It (feat. Charli XCX)', 'artist': 'Icona Pop', } }, { 'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ', 'note': 'Test VEVO video with age protection (#956)', 'info_dict': { 'id': '07FYdnEawAQ', 'ext': 'mp4', 'upload_date': '20130703', 'title': 'Justin Timberlake - Tunnel Vision (Official Music Video) (Explicit)', 'alt_title': 'Tunnel Vision', 'description': 'md5:07dab3356cde4199048e4c7cd93471e1', 'duration': 419, 'uploader': 'justintimberlakeVEVO', 'uploader_id': 'justintimberlakeVEVO', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/justintimberlakeVEVO', 'creator': 'Justin Timberlake', 'track': 'Tunnel Vision', 'artist': 'Justin Timberlake', 'age_limit': 18, } }, { 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ', 'note': 'Embed-only video (#1746)', 'info_dict': { 'id': 'yZIXLfi8CZQ', 'ext': 'mp4', 'upload_date': '20120608', 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012', 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7', 'uploader': 'SET India', 'uploader_id': 'setindia', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia', 'age_limit': 18, } }, { 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=UxxajLWwzqY', 'note': 'Use the first video ID in the URL', 'info_dict': { 'id': 'BaW_jenozKc', 'ext': 'mp4', 'title': 'youtube-dl test video "\'/\\ä↭𝕐', 'uploader': 'Philipp Hagemeister', 'uploader_id': 'phihag', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag', 'upload_date': '20121002', 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact [email protected] .', 'categories': ['Science & Technology'], 'tags': ['youtube-dl'], 'duration': 10, 'view_count': int, 'like_count': int, 'dislike_count': int, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I', 'note': '256k DASH audio (format 141) via DASH manifest', 'info_dict': { 'id': 'a9LDPn-MO4I', 'ext': 'm4a', 'upload_date': '20121002', 'uploader_id': '8KVIDEO', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO', 'description': '', 'uploader': '8KVIDEO', 'title': 'UHDTV TEST 8K VIDEO.mp4' }, 'params': { 'youtube_include_dash_manifest': True, 'format': '141', }, 'skip': 'format 141 not served anymore', }, # DASH manifest with encrypted signature { 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA', 'info_dict': { 'id': 'IB3lcPjvWLA', 'ext': 'm4a', 'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson', 'description': 'md5:8f5e2b82460520b619ccac1f509d43bf', 'duration': 244, 'uploader': 'AfrojackVEVO', 'uploader_id': 'AfrojackVEVO', 'upload_date': '20131011', }, 'params': { 'youtube_include_dash_manifest': True, 'format': '141/bestaudio[ext=m4a]', }, }, # JS player signature function name containing $ { 'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM', 'info_dict': { 'id': 'nfWlot6h_JM', 'ext': 'm4a', 'title': 'Taylor Swift - Shake It Off', 'description': 'md5:307195cd21ff7fa352270fe884570ef0', 'duration': 242, 'uploader': 'TaylorSwiftVEVO', 'uploader_id': 'TaylorSwiftVEVO', 'upload_date': '20140818', }, 'params': { 'youtube_include_dash_manifest': True, 'format': '141/bestaudio[ext=m4a]', }, }, # Controversy video { 'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8', 'info_dict': { 'id': 'T4XJQO3qol8', 'ext': 'mp4', 'duration': 219, 'upload_date': '20100909', 'uploader': 'Amazing Atheist', 'uploader_id': 'TheAmazingAtheist', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist', 'title': 'Burning Everyone\'s Koran', 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html', } }, # Normal age-gate video (No vevo, embed allowed) { 'url': 'https://youtube.com/watch?v=HtVdAasjOgU', 'info_dict': { 'id': 'HtVdAasjOgU', 'ext': 'mp4', 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer', 'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}', 'duration': 142, 'uploader': 'The Witcher', 'uploader_id': 'WitcherGame', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame', 'upload_date': '20140605', 'age_limit': 18, }, }, # Age-gate video with encrypted signature { 'url': 'https://www.youtube.com/watch?v=6kLq3WMV1nU', 'info_dict': { 'id': '6kLq3WMV1nU', 'ext': 'mp4', 'title': 'Dedication To My Ex (Miss That) (Lyric Video)', 'description': 'md5:33765bb339e1b47e7e72b5490139bb41', 'duration': 246, 'uploader': 'LloydVEVO', 'uploader_id': 'LloydVEVO', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/LloydVEVO', 'upload_date': '20110629', 'age_limit': 18, }, }, # video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421) # YouTube Red ad is not captured for creator { 'url': '__2ABJjxzNo', 'info_dict': { 'id': '__2ABJjxzNo', 'ext': 'mp4', 'duration': 266, 'upload_date': '20100430', 'uploader_id': 'deadmau5', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5', 'creator': 'Dada Life, deadmau5', 'description': 'md5:12c56784b8032162bb936a5f76d55360', 'uploader': 'deadmau5', 'title': 'Deadmau5 - Some Chords (HD)', 'alt_title': 'This Machine Kills Some Chords', }, 'expected_warnings': [ 'DASH manifest missing', ] }, # Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431) { 'url': 'lqQg6PlCWgI', 'info_dict': { 'id': 'lqQg6PlCWgI', 'ext': 'mp4', 'duration': 6085, 'upload_date': '20150827', 'uploader_id': 'olympic', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic', 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games', 'uploader': 'Olympic', 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games', }, 'params': { 'skip_download': 'requires avconv', } }, # Non-square pixels { 'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0', 'info_dict': { 'id': '_b-2C3KPAM0', 'ext': 'mp4', 'stretched_ratio': 16 / 9., 'duration': 85, 'upload_date': '20110310', 'uploader_id': 'AllenMeow', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow', 'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯', 'uploader': '孫ᄋᄅ', 'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人', }, }, # url_encoded_fmt_stream_map is empty string { 'url': 'qEJwOuvDf7I', 'info_dict': { 'id': 'qEJwOuvDf7I', 'ext': 'webm', 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге', 'description': '', 'upload_date': '20150404', 'uploader_id': 'spbelect', 'uploader': 'Наблюдатели Петербурга', }, 'params': { 'skip_download': 'requires avconv', }, 'skip': 'This live event has ended.', }, # Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097) { 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y', 'info_dict': { 'id': 'FIl7x6_3R5Y', 'ext': 'webm', 'title': 'md5:7b81415841e02ecd4313668cde88737a', 'description': 'md5:116377fd2963b81ec4ce64b542173306', 'duration': 220, 'upload_date': '20150625', 'uploader_id': 'dorappi2000', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000', 'uploader': 'dorappi2000', 'formats': 'mincount:31', }, 'skip': 'not actual anymore', }, # DASH manifest with segment_list { 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8', 'md5': '8ce563a1d667b599d21064e982ab9e31', 'info_dict': { 'id': 'CsmdDsKjzN8', 'ext': 'mp4', 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510 'uploader': 'Airtek', 'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.', 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ', 'title': 'Retransmisión XVIII Media maratón Zaragoza 2015', }, 'params': { 'youtube_include_dash_manifest': True, 'format': '135', # bestvideo }, 'skip': 'This live event has ended.', }, { # Multifeed videos (multiple cameras), URL is for Main Camera 'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs', 'info_dict': { 'id': 'jqWvoWXjCVs', 'title': 'teamPGP: Rocket League Noob Stream', 'description': 'md5:dc7872fb300e143831327f1bae3af010', }, 'playlist': [{ 'info_dict': { 'id': 'jqWvoWXjCVs', 'ext': 'mp4', 'title': 'teamPGP: Rocket League Noob Stream (Main Camera)', 'description': 'md5:dc7872fb300e143831327f1bae3af010', 'duration': 7335, 'upload_date': '20150721', 'uploader': 'Beer Games Beer', 'uploader_id': 'beergamesbeer', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer', 'license': 'Standard YouTube License', }, }, { 'info_dict': { 'id': '6h8e8xoXJzg', 'ext': 'mp4', 'title': 'teamPGP: Rocket League Noob Stream (kreestuh)', 'description': 'md5:dc7872fb300e143831327f1bae3af010', 'duration': 7337, 'upload_date': '20150721', 'uploader': 'Beer Games Beer', 'uploader_id': 'beergamesbeer', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer', 'license': 'Standard YouTube License', }, }, { 'info_dict': { 'id': 'PUOgX5z9xZw', 'ext': 'mp4', 'title': 'teamPGP: Rocket League Noob Stream (grizzle)', 'description': 'md5:dc7872fb300e143831327f1bae3af010', 'duration': 7337, 'upload_date': '20150721', 'uploader': 'Beer Games Beer', 'uploader_id': 'beergamesbeer', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer', 'license': 'Standard YouTube License', }, }, { 'info_dict': { 'id': 'teuwxikvS5k', 'ext': 'mp4', 'title': 'teamPGP: Rocket League Noob Stream (zim)', 'description': 'md5:dc7872fb300e143831327f1bae3af010', 'duration': 7334, 'upload_date': '20150721', 'uploader': 'Beer Games Beer', 'uploader_id': 'beergamesbeer', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer', 'license': 'Standard YouTube License', }, }], 'params': { 'skip_download': True, }, 'skip': 'This video is not available.', }, { # Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536) 'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo', 'info_dict': { 'id': 'gVfLd0zydlo', 'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30', }, 'playlist_count': 2, 'skip': 'Not multifeed anymore', }, { 'url': 'https://vid.plus/FlRa-iH7PGw', 'only_matching': True, }, { 'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html', 'only_matching': True, }, { # Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468) # Also tests cut-off URL expansion in video description (see # https://github.com/ytdl-org/youtube-dl/issues/1892, # https://github.com/ytdl-org/youtube-dl/issues/8164) 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg', 'info_dict': { 'id': 'lsguqyKfVQg', 'ext': 'mp4', 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21', 'alt_title': 'Dark Walk - Position Music', 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a', 'duration': 133, 'upload_date': '20151119', 'uploader_id': 'IronSoulElf', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf', 'uploader': 'IronSoulElf', 'creator': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan', 'track': 'Dark Walk - Position Music', 'artist': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan', 'album': 'Position Music - Production Music Vol. 143 - Dark Walk', }, 'params': { 'skip_download': True, }, }, { # Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468) 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8', 'only_matching': True, }, { # Video with yt:stretch=17:0 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM', 'info_dict': { 'id': 'Q39EVAstoRM', 'ext': 'mp4', 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4', 'description': 'md5:ee18a25c350637c8faff806845bddee9', 'upload_date': '20151107', 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA', 'uploader': 'CH GAMER DROID', }, 'params': { 'skip_download': True, }, 'skip': 'This video does not exist.', }, { # Video licensed under Creative Commons 'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA', 'info_dict': { 'id': 'M4gD1WSo5mA', 'ext': 'mp4', 'title': 'md5:e41008789470fc2533a3252216f1c1d1', 'description': 'md5:a677553cf0840649b731a3024aeff4cc', 'duration': 721, 'upload_date': '20150127', 'uploader_id': 'BerkmanCenter', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter', 'uploader': 'The Berkman Klein Center for Internet & Society', 'license': 'Creative Commons Attribution license (reuse allowed)', }, 'params': { 'skip_download': True, }, }, { # Channel-like uploader_url 'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg', 'info_dict': { 'id': 'eQcmzGIKrzg', 'ext': 'mp4', 'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders', 'description': 'md5:dda0d780d5a6e120758d1711d062a867', 'duration': 4060, 'upload_date': '20151119', 'uploader': 'Bernie Sanders', 'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg', 'license': 'Creative Commons Attribution license (reuse allowed)', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;amp;v=V36LpHqtcDY', 'only_matching': True, }, { # YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059) 'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo', 'only_matching': True, }, { # Rental video preview 'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg', 'info_dict': { 'id': 'uGpuVWrhIzE', 'ext': 'mp4', 'title': 'Piku - Trailer', 'description': 'md5:c36bd60c3fd6f1954086c083c72092eb', 'upload_date': '20150811', 'uploader': 'FlixMatrix', 'uploader_id': 'FlixMatrixKaravan', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan', 'license': 'Standard YouTube License', }, 'params': { 'skip_download': True, }, 'skip': 'This video is not available.', }, { # YouTube Red video with episode data 'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4', 'info_dict': { 'id': 'iqKdEhx-dD4', 'ext': 'mp4', 'title': 'Isolation - Mind Field (Ep 1)', 'description': 'md5:46a29be4ceffa65b92d277b93f463c0f', 'duration': 2085, 'upload_date': '20170118', 'uploader': 'Vsauce', 'uploader_id': 'Vsauce', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce', 'series': 'Mind Field', 'season_number': 1, 'episode_number': 1, }, 'params': { 'skip_download': True, }, 'expected_warnings': [ 'Skipping DASH manifest', ], }, { # The following content has been identified by the YouTube community # as inappropriate or offensive to some audiences. 'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI', 'info_dict': { 'id': '6SJNVb0GnPI', 'ext': 'mp4', 'title': 'Race Differences in Intelligence', 'description': 'md5:5d161533167390427a1f8ee89a1fc6f1', 'duration': 965, 'upload_date': '20140124', 'uploader': 'New Century Foundation', 'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg', }, 'params': { 'skip_download': True, }, }, { # itag 212 'url': '1t24XAntNCY', 'only_matching': True, }, { # geo restricted to JP 'url': 'sJL6WA-aGkQ', 'only_matching': True, }, { 'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM', 'only_matching': True, }, { 'url': 'https://invidio.us/watch?v=BaW_jenozKc', 'only_matching': True, }, { # DRM protected 'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc', 'only_matching': True, }, { # Video with unsupported adaptive stream type formats 'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U', 'info_dict': { 'id': 'Z4Vy8R84T1U', 'ext': 'mp4', 'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'duration': 433, 'upload_date': '20130923', 'uploader': 'Amelia Putri Harwita', 'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q', 'formats': 'maxcount:10', }, 'params': { 'skip_download': True, 'youtube_include_dash_manifest': False, }, 'skip': 'not actual anymore', }, { # Youtube Music Auto-generated description 'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs', 'info_dict': { 'id': 'MgNrAu2pzNs', 'ext': 'mp4', 'title': 'Voyeur Girl', 'description': 'md5:7ae382a65843d6df2685993e90a8628f', 'upload_date': '20190312', 'uploader': 'Stephen - Topic', 'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA', 'artist': 'Stephen', 'track': 'Voyeur Girl', 'album': 'it\'s too much love to know my dear', 'release_date': '20190313', 'release_year': 2019, }, 'params': { 'skip_download': True, }, }, { # Youtube Music Auto-generated description # Retrieve 'artist' field from 'Artist:' in video description # when it is present on youtube music video 'url': 'https://www.youtube.com/watch?v=k0jLE7tTwjY', 'info_dict': { 'id': 'k0jLE7tTwjY', 'ext': 'mp4', 'title': 'Latch Feat. Sam Smith', 'description': 'md5:3cb1e8101a7c85fcba9b4fb41b951335', 'upload_date': '20150110', 'uploader': 'Various Artists - Topic', 'uploader_id': 'UCNkEcmYdjrH4RqtNgh7BZ9w', 'artist': 'Disclosure', 'track': 'Latch Feat. Sam Smith', 'album': 'Latch Featuring Sam Smith', 'release_date': '20121008', 'release_year': 2012, }, 'params': { 'skip_download': True, }, }, { # Youtube Music Auto-generated description # handle multiple artists on youtube music video 'url': 'https://www.youtube.com/watch?v=74qn0eJSjpA', 'info_dict': { 'id': '74qn0eJSjpA', 'ext': 'mp4', 'title': 'Eastside', 'description': 'md5:290516bb73dcbfab0dcc4efe6c3de5f2', 'upload_date': '20180710', 'uploader': 'Benny Blanco - Topic', 'uploader_id': 'UCzqz_ksRu_WkIzmivMdIS7A', 'artist': 'benny blanco, Halsey, Khalid', 'track': 'Eastside', 'album': 'Eastside', 'release_date': '20180713', 'release_year': 2018, }, 'params': { 'skip_download': True, }, }, { # Youtube Music Auto-generated description # handle youtube music video with release_year and no release_date 'url': 'https://www.youtube.com/watch?v=-hcAI0g-f5M', 'info_dict': { 'id': '-hcAI0g-f5M', 'ext': 'mp4', 'title': 'Put It On Me', 'description': 'md5:f6422397c07c4c907c6638e1fee380a5', 'upload_date': '20180426', 'uploader': 'Matt Maeson - Topic', 'uploader_id': 'UCnEkIGqtGcQMLk73Kp-Q5LQ', 'artist': 'Matt Maeson', 'track': 'Put It On Me', 'album': 'The Hearse', 'release_date': None, 'release_year': 2018, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q', 'only_matching': True, }, { # invalid -> valid video id redirection 'url': 'DJztXj2GPfl', 'info_dict': { 'id': 'DJztXj2GPfk', 'ext': 'mp4', 'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)', 'description': 'md5:bf577a41da97918e94fa9798d9228825', 'upload_date': '20090125', 'uploader': 'Prochorowka', 'uploader_id': 'Prochorowka', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka', 'artist': 'Panjabi MC', 'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix', 'album': 'Beware of the Boys (Mundian To Bach Ke)', }, 'params': { 'skip_download': True, }, } ] def __init__(self, *args, **kwargs): super(YoutubeIE, self).__init__(*args, **kwargs) self._player_cache = {} def report_video_info_webpage_download(self, video_id): """Report attempt to download video info webpage.""" self.to_screen('%s: Downloading video info webpage' % video_id) def report_information_extraction(self, video_id): """Report attempt to extract video information.""" self.to_screen('%s: Extracting video information' % video_id) def report_unavailable_format(self, video_id, format): """Report extracted video URL.""" self.to_screen('%s: Format %s not available' % (video_id, format)) def report_rtmp_download(self): """Indicate the download will use the RTMP protocol.""" self.to_screen('RTMP download detected') def _signature_cache_id(self, example_sig): """ Return a string representation of a signature """ return '.'.join(compat_str(len(part)) for part in example_sig.split('.')) @classmethod def _extract_player_info(cls, player_url): for player_re in cls._PLAYER_INFO_RE: id_m = re.search(player_re, player_url) if id_m: break else: raise ExtractorError('Cannot identify player %r' % player_url) return id_m.group('ext'), id_m.group('id') def _extract_signature_function(self, video_id, player_url, example_sig): player_type, player_id = self._extract_player_info(player_url) # Read from filesystem cache func_id = '%s_%s_%s' % ( player_type, player_id, self._signature_cache_id(example_sig)) assert os.path.basename(func_id) == func_id cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id) if cache_spec is not None: return lambda s: ''.join(s[i] for i in cache_spec) download_note = ( 'Downloading player %s' % player_url if self._downloader.params.get('verbose') else 'Downloading %s player %s' % (player_type, player_id) ) if player_type == 'js': code = self._download_webpage( player_url, video_id, note=download_note, errnote='Download of %s failed' % player_url) res = self._parse_sig_js(code) elif player_type == 'swf': urlh = self._request_webpage( player_url, video_id, note=download_note, errnote='Download of %s failed' % player_url) code = urlh.read() res = self._parse_sig_swf(code) else: assert False, 'Invalid player type %r' % player_type test_string = ''.join(map(compat_chr, range(len(example_sig)))) cache_res = res(test_string) cache_spec = [ord(c) for c in cache_res] self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec) return res def _print_sig_code(self, func, example_sig): def gen_sig_code(idxs): def _genslice(start, end, step): starts = '' if start == 0 else str(start) ends = (':%d' % (end + step)) if end + step >= 0 else ':' steps = '' if step == 1 else (':%d' % step) return 's[%s%s%s]' % (starts, ends, steps) step = None # Quelch pyflakes warnings - start will be set when step is set start = '(Never used)' for i, prev in zip(idxs[1:], idxs[:-1]): if step is not None: if i - prev == step: continue yield _genslice(start, prev, step) step = None continue if i - prev in [-1, 1]: step = i - prev start = prev continue else: yield 's[%d]' % prev if step is None: yield 's[%d]' % i else: yield _genslice(start, i, step) test_string = ''.join(map(compat_chr, range(len(example_sig)))) cache_res = func(test_string) cache_spec = [ord(c) for c in cache_res] expr_code = ' + '.join(gen_sig_code(cache_spec)) signature_id_tuple = '(%s)' % ( ', '.join(compat_str(len(p)) for p in example_sig.split('.'))) code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n' ' return %s\n') % (signature_id_tuple, expr_code) self.to_screen('Extracted signature function:\n' + code) def _parse_sig_js(self, jscode): funcname = self._search_regex( (r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(', r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(', r'\b(?P<sig>[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)', r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)', # Obsolete patterns r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(', r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(', r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(', r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(', r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(', r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(', r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(', r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('), jscode, 'Initial JS player signature function name', group='sig') jsi = JSInterpreter(jscode) initial_function = jsi.extract_function(funcname) return lambda s: initial_function([s]) def _parse_sig_swf(self, file_contents): swfi = SWFInterpreter(file_contents) TARGET_CLASSNAME = 'SignatureDecipher' searched_class = swfi.extract_class(TARGET_CLASSNAME) initial_function = swfi.extract_function(searched_class, 'decipher') return lambda s: initial_function([s]) def _decrypt_signature(self, s, video_id, player_url, age_gate=False): """Turn the encrypted s field into a working signature""" if player_url is None: raise ExtractorError('Cannot decrypt signature without player_url') if player_url.startswith('//'): player_url = 'https:' + player_url elif not re.match(r'https?://', player_url): player_url = compat_urlparse.urljoin( 'https://www.youtube.com', player_url) try: player_id = (player_url, self._signature_cache_id(s)) if player_id not in self._player_cache: func = self._extract_signature_function( video_id, player_url, s ) self._player_cache[player_id] = func func = self._player_cache[player_id] if self._downloader.params.get('youtube_print_sig_code'): self._print_sig_code(func, s) return func(s) except Exception as e: tb = traceback.format_exc() raise ExtractorError( 'Signature extraction failed: ' + tb, cause=e) def _get_subtitles(self, video_id, webpage): try: subs_doc = self._download_xml( 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id, video_id, note=False) except ExtractorError as err: self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err)) return {} sub_lang_list = {} for track in subs_doc.findall('track'): lang = track.attrib['lang_code'] if lang in sub_lang_list: continue sub_formats = [] for ext in self._SUBTITLE_FORMATS: params = compat_urllib_parse_urlencode({ 'lang': lang, 'v': video_id, 'fmt': ext, 'name': track.attrib['name'].encode('utf-8'), }) sub_formats.append({ 'url': 'https://www.youtube.com/api/timedtext?' + params, 'ext': ext, }) sub_lang_list[lang] = sub_formats if not sub_lang_list: self._downloader.report_warning('video doesn\'t have subtitles') return {} return sub_lang_list def _get_ytplayer_config(self, video_id, webpage): patterns = ( # User data may contain arbitrary character sequences that may affect # JSON extraction with regex, e.g. when '};' is contained the second # regex won't capture the whole JSON. Yet working around by trying more # concrete regex first keeping in mind proper quoted string handling # to be implemented in future that will replace this workaround (see # https://github.com/ytdl-org/youtube-dl/issues/7468, # https://github.com/ytdl-org/youtube-dl/pull/7599) r';ytplayer\.config\s*=\s*({.+?});ytplayer', r';ytplayer\.config\s*=\s*({.+?});', ) config = self._search_regex( patterns, webpage, 'ytplayer.config', default=None) if config: return self._parse_json( uppercase_escape(config), video_id, fatal=False) def _get_automatic_captions(self, video_id, webpage): """We need the webpage for getting the captions url, pass it as an argument to speed up the process.""" self.to_screen('%s: Looking for automatic captions' % video_id) player_config = self._get_ytplayer_config(video_id, webpage) err_msg = 'Couldn\'t find automatic captions for %s' % video_id if not player_config: self._downloader.report_warning(err_msg) return {} try: args = player_config['args'] caption_url = args.get('ttsurl') if caption_url: timestamp = args['timestamp'] # We get the available subtitles list_params = compat_urllib_parse_urlencode({ 'type': 'list', 'tlangs': 1, 'asrs': 1, }) list_url = caption_url + '&' + list_params caption_list = self._download_xml(list_url, video_id) original_lang_node = caption_list.find('track') if original_lang_node is None: self._downloader.report_warning('Video doesn\'t have automatic captions') return {} original_lang = original_lang_node.attrib['lang_code'] caption_kind = original_lang_node.attrib.get('kind', '') sub_lang_list = {} for lang_node in caption_list.findall('target'): sub_lang = lang_node.attrib['lang_code'] sub_formats = [] for ext in self._SUBTITLE_FORMATS: params = compat_urllib_parse_urlencode({ 'lang': original_lang, 'tlang': sub_lang, 'fmt': ext, 'ts': timestamp, 'kind': caption_kind, }) sub_formats.append({ 'url': caption_url + '&' + params, 'ext': ext, }) sub_lang_list[sub_lang] = sub_formats return sub_lang_list def make_captions(sub_url, sub_langs): parsed_sub_url = compat_urllib_parse_urlparse(sub_url) caption_qs = compat_parse_qs(parsed_sub_url.query) captions = {} for sub_lang in sub_langs: sub_formats = [] for ext in self._SUBTITLE_FORMATS: caption_qs.update({ 'tlang': [sub_lang], 'fmt': [ext], }) sub_url = compat_urlparse.urlunparse(parsed_sub_url._replace( query=compat_urllib_parse_urlencode(caption_qs, True))) sub_formats.append({ 'url': sub_url, 'ext': ext, }) captions[sub_lang] = sub_formats return captions # New captions format as of 22.06.2017 player_response = args.get('player_response') if player_response and isinstance(player_response, compat_str): player_response = self._parse_json( player_response, video_id, fatal=False) if player_response: renderer = player_response['captions']['playerCaptionsTracklistRenderer'] base_url = renderer['captionTracks'][0]['baseUrl'] sub_lang_list = [] for lang in renderer['translationLanguages']: lang_code = lang.get('languageCode') if lang_code: sub_lang_list.append(lang_code) return make_captions(base_url, sub_lang_list) # Some videos don't provide ttsurl but rather caption_tracks and # caption_translation_languages (e.g. 20LmZk1hakA) # Does not used anymore as of 22.06.2017 caption_tracks = args['caption_tracks'] caption_translation_languages = args['caption_translation_languages'] caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0] sub_lang_list = [] for lang in caption_translation_languages.split(','): lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang)) sub_lang = lang_qs.get('lc', [None])[0] if sub_lang: sub_lang_list.append(sub_lang) return make_captions(caption_url, sub_lang_list) # An extractor error can be raise by the download process if there are # no automatic captions but there are subtitles except (KeyError, IndexError, ExtractorError): self._downloader.report_warning(err_msg) return {} def _mark_watched(self, video_id, video_info, player_response): playback_url = url_or_none(try_get( player_response, lambda x: x['playbackTracking']['videostatsPlaybackUrl']['baseUrl']) or try_get( video_info, lambda x: x['videostats_playback_base_url'][0])) if not playback_url: return parsed_playback_url = compat_urlparse.urlparse(playback_url) qs = compat_urlparse.parse_qs(parsed_playback_url.query) # cpn generation algorithm is reverse engineered from base.js. # In fact it works even with dummy cpn. CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_' cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16))) qs.update({ 'ver': ['2'], 'cpn': [cpn], }) playback_url = compat_urlparse.urlunparse( parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True))) self._download_webpage( playback_url, video_id, 'Marking watched', 'Unable to mark watched', fatal=False) @staticmethod def _extract_urls(webpage): # Embedded YouTube player entries = [ unescapeHTML(mobj.group('url')) for mobj in re.finditer(r'''(?x) (?: <iframe[^>]+?src=| data-video-url=| <embed[^>]+?src=| embedSWF\(?:\s*| <object[^>]+data=| new\s+SWFObject\( ) (["\']) (?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/ (?:embed|v|p)/[0-9A-Za-z_-]{11}.*?) \1''', webpage)] # lazyYT YouTube embed entries.extend(list(map( unescapeHTML, re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage)))) # Wordpress "YouTube Video Importer" plugin matches = re.findall(r'''(?x)<div[^>]+ class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+ data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage) entries.extend(m[-1] for m in matches) return entries @staticmethod def _extract_url(webpage): urls = YoutubeIE._extract_urls(webpage) return urls[0] if urls else None @classmethod def extract_id(cls, url): mobj = re.match(cls._VALID_URL, url, re.VERBOSE) if mobj is None: raise ExtractorError('Invalid URL: %s' % url) video_id = mobj.group(2) return video_id def _extract_chapters_from_json(self, webpage, video_id, duration): if not webpage: return player = self._parse_json( self._search_regex( r'RELATED_PLAYER_ARGS["\']\s*:\s*({.+})\s*,?\s*\n', webpage, 'player args', default='{}'), video_id, fatal=False) if not player or not isinstance(player, dict): return watch_next_response = player.get('watch_next_response') if not isinstance(watch_next_response, compat_str): return response = self._parse_json(watch_next_response, video_id, fatal=False) if not response or not isinstance(response, dict): return chapters_list = try_get( response, lambda x: x['playerOverlays'] ['playerOverlayRenderer'] ['decoratedPlayerBarRenderer'] ['decoratedPlayerBarRenderer'] ['playerBar'] ['chapteredPlayerBarRenderer'] ['chapters'], list) if not chapters_list: return def chapter_time(chapter): return float_or_none( try_get( chapter, lambda x: x['chapterRenderer']['timeRangeStartMillis'], int), scale=1000) chapters = [] for next_num, chapter in enumerate(chapters_list, start=1): start_time = chapter_time(chapter) if start_time is None: continue end_time = (chapter_time(chapters_list[next_num]) if next_num < len(chapters_list) else duration) if end_time is None: continue title = try_get( chapter, lambda x: x['chapterRenderer']['title']['simpleText'], compat_str) chapters.append({ 'start_time': start_time, 'end_time': end_time, 'title': title, }) return chapters @staticmethod def _extract_chapters_from_description(description, duration): if not description: return None chapter_lines = re.findall( r'(?:^|<br\s*/>)([^<]*<a[^>]+onclick=["\']yt\.www\.watch\.player\.seekTo[^>]+>(\d{1,2}:\d{1,2}(?::\d{1,2})?)</a>[^>]*)(?=$|<br\s*/>)', description) if not chapter_lines: return None chapters = [] for next_num, (chapter_line, time_point) in enumerate( chapter_lines, start=1): start_time = parse_duration(time_point) if start_time is None: continue if start_time > duration: break end_time = (duration if next_num == len(chapter_lines) else parse_duration(chapter_lines[next_num][1])) if end_time is None: continue if end_time > duration: end_time = duration if start_time > end_time: break chapter_title = re.sub( r'<a[^>]+>[^<]+</a>', '', chapter_line).strip(' \t-') chapter_title = re.sub(r'\s+', ' ', chapter_title) chapters.append({ 'start_time': start_time, 'end_time': end_time, 'title': chapter_title, }) return chapters def _extract_chapters(self, webpage, description, video_id, duration): return (self._extract_chapters_from_json(webpage, video_id, duration) or self._extract_chapters_from_description(description, duration)) def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) proto = ( 'http' if self._downloader.params.get('prefer_insecure', False) else 'https') start_time = None end_time = None parsed_url = compat_urllib_parse_urlparse(url) for component in [parsed_url.fragment, parsed_url.query]: query = compat_parse_qs(component) if start_time is None and 't' in query: start_time = parse_duration(query['t'][0]) if start_time is None and 'start' in query: start_time = parse_duration(query['start'][0]) if end_time is None and 'end' in query: end_time = parse_duration(query['end'][0]) # Extract original video URL from URL with redirection, like age verification, using next_url parameter mobj = re.search(self._NEXT_URL_RE, url) if mobj: url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/') video_id = self.extract_id(url) # Get video webpage url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id video_webpage, urlh = self._download_webpage_handle(url, video_id) qs = compat_parse_qs(compat_urllib_parse_urlparse(urlh.geturl()).query) video_id = qs.get('v', [None])[0] or video_id # Attempt to extract SWF player URL mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage) if mobj is not None: player_url = re.sub(r'\\(.)', r'\1', mobj.group(1)) else: player_url = None dash_mpds = [] def add_dash_mpd(video_info): dash_mpd = video_info.get('dashmpd') if dash_mpd and dash_mpd[0] not in dash_mpds: dash_mpds.append(dash_mpd[0]) def add_dash_mpd_pr(pl_response): dash_mpd = url_or_none(try_get( pl_response, lambda x: x['streamingData']['dashManifestUrl'], compat_str)) if dash_mpd and dash_mpd not in dash_mpds: dash_mpds.append(dash_mpd) is_live = None view_count = None def extract_view_count(v_info): return int_or_none(try_get(v_info, lambda x: x['view_count'][0])) def extract_player_response(player_response, video_id): pl_response = str_or_none(player_response) if not pl_response: return pl_response = self._parse_json(pl_response, video_id, fatal=False) if isinstance(pl_response, dict): add_dash_mpd_pr(pl_response) return pl_response player_response = {} # Get video info video_info = {} embed_webpage = None if re.search(r'player-age-gate-content">', video_webpage) is not None: age_gate = True # We simulate the access to the video from www.youtube.com/v/{video_id} # this can be viewed without login into Youtube url = proto + '://www.youtube.com/embed/%s' % video_id embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage') data = compat_urllib_parse_urlencode({ 'video_id': video_id, 'eurl': 'https://youtube.googleapis.com/v/' + video_id, 'sts': self._search_regex( r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''), }) video_info_url = proto + '://www.youtube.com/get_video_info?' + data try: video_info_webpage = self._download_webpage( video_info_url, video_id, note='Refetching age-gated info webpage', errnote='unable to download video info webpage') except ExtractorError: video_info_webpage = None if video_info_webpage: video_info = compat_parse_qs(video_info_webpage) pl_response = video_info.get('player_response', [None])[0] player_response = extract_player_response(pl_response, video_id) add_dash_mpd(video_info) view_count = extract_view_count(video_info) else: age_gate = False # Try looking directly into the video webpage ytplayer_config = self._get_ytplayer_config(video_id, video_webpage) if ytplayer_config: args = ytplayer_config['args'] if args.get('url_encoded_fmt_stream_map') or args.get('hlsvp'): # Convert to the same format returned by compat_parse_qs video_info = dict((k, [v]) for k, v in args.items()) add_dash_mpd(video_info) # Rental video is not rented but preview is available (e.g. # https://www.youtube.com/watch?v=yYr8q0y5Jfg, # https://github.com/ytdl-org/youtube-dl/issues/10532) if not video_info and args.get('ypc_vid'): return self.url_result( args['ypc_vid'], YoutubeIE.ie_key(), video_id=args['ypc_vid']) if args.get('livestream') == '1' or args.get('live_playback') == 1: is_live = True if not player_response: player_response = extract_player_response(args.get('player_response'), video_id) if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True): add_dash_mpd_pr(player_response) def extract_unavailable_message(): messages = [] for tag, kind in (('h1', 'message'), ('div', 'submessage')): msg = self._html_search_regex( r'(?s)<{tag}[^>]+id=["\']unavailable-{kind}["\'][^>]*>(.+?)</{tag}>'.format(tag=tag, kind=kind), video_webpage, 'unavailable %s' % kind, default=None) if msg: messages.append(msg) if messages: return '\n'.join(messages) if not video_info and not player_response: unavailable_message = extract_unavailable_message() if not unavailable_message: unavailable_message = 'Unable to extract video data' raise ExtractorError( 'YouTube said: %s' % unavailable_message, expected=True, video_id=video_id) if not isinstance(video_info, dict): video_info = {} video_details = try_get( player_response, lambda x: x['videoDetails'], dict) or {} video_title = video_info.get('title', [None])[0] or video_details.get('title') if not video_title: self._downloader.report_warning('Unable to extract video title') video_title = '_' description_original = video_description = get_element_by_id("eow-description", video_webpage) if video_description: def replace_url(m): redir_url = compat_urlparse.urljoin(url, m.group(1)) parsed_redir_url = compat_urllib_parse_urlparse(redir_url) if re.search(r'^(?:www\.)?(?:youtube(?:-nocookie)?\.com|youtu\.be)$', parsed_redir_url.netloc) and parsed_redir_url.path == '/redirect': qs = compat_parse_qs(parsed_redir_url.query) q = qs.get('q') if q and q[0]: return q[0] return redir_url description_original = video_description = re.sub(r'''(?x) <a\s+ (?:[a-zA-Z-]+="[^"]*"\s+)*? (?:title|href)="([^"]+)"\s+ (?:[a-zA-Z-]+="[^"]*"\s+)*? class="[^"]*"[^>]*> [^<]+\.{3}\s* </a> ''', replace_url, video_description) video_description = clean_html(video_description) else: video_description = self._html_search_meta('description', video_webpage) or video_details.get('shortDescription') if not smuggled_data.get('force_singlefeed', False): if not self._downloader.params.get('noplaylist'): multifeed_metadata_list = try_get( player_response, lambda x: x['multicamera']['playerLegacyMulticameraRenderer']['metadataList'], compat_str) or try_get( video_info, lambda x: x['multifeed_metadata_list'][0], compat_str) if multifeed_metadata_list: entries = [] feed_ids = [] for feed in multifeed_metadata_list.split(','): # Unquote should take place before split on comma (,) since textual # fields may contain comma as well (see # https://github.com/ytdl-org/youtube-dl/issues/8536) feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed)) def feed_entry(name): return try_get(feed_data, lambda x: x[name][0], compat_str) feed_id = feed_entry('id') if not feed_id: continue feed_title = feed_entry('title') title = video_title if feed_title: title += ' (%s)' % feed_title entries.append({ '_type': 'url_transparent', 'ie_key': 'Youtube', 'url': smuggle_url( '%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]), {'force_singlefeed': True}), 'title': title, }) feed_ids.append(feed_id) self.to_screen( 'Downloading multifeed video (%s) - add --no-playlist to just download video %s' % (', '.join(feed_ids), video_id)) return self.playlist_result(entries, video_id, video_title, video_description) else: self.to_screen('Downloading just video %s because of --no-playlist' % video_id) if view_count is None: view_count = extract_view_count(video_info) if view_count is None and video_details: view_count = int_or_none(video_details.get('viewCount')) if is_live is None: is_live = bool_or_none(video_details.get('isLive')) # Check for "rental" videos if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info: raise ExtractorError('"rental" videos not supported. See https://github.com/ytdl-org/youtube-dl/issues/359 for more information.', expected=True) def _extract_filesize(media_url): return int_or_none(self._search_regex( r'\bclen[=/](\d+)', media_url, 'filesize', default=None)) streaming_formats = try_get(player_response, lambda x: x['streamingData']['formats'], list) or [] streaming_formats.extend(try_get(player_response, lambda x: x['streamingData']['adaptiveFormats'], list) or []) if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'): self.report_rtmp_download() formats = [{ 'format_id': '_rtmp', 'protocol': 'rtmp', 'url': video_info['conn'][0], 'player_url': player_url, }] elif not is_live and (streaming_formats or len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1): encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0] if 'rtmpe%3Dyes' in encoded_url_map: raise ExtractorError('rtmpe downloads are not supported, see https://github.com/ytdl-org/youtube-dl/issues/343 for more information.', expected=True) formats = [] formats_spec = {} fmt_list = video_info.get('fmt_list', [''])[0] if fmt_list: for fmt in fmt_list.split(','): spec = fmt.split('/') if len(spec) > 1: width_height = spec[1].split('x') if len(width_height) == 2: formats_spec[spec[0]] = { 'resolution': spec[1], 'width': int_or_none(width_height[0]), 'height': int_or_none(width_height[1]), } for fmt in streaming_formats: itag = str_or_none(fmt.get('itag')) if not itag: continue quality = fmt.get('quality') quality_label = fmt.get('qualityLabel') or quality formats_spec[itag] = { 'asr': int_or_none(fmt.get('audioSampleRate')), 'filesize': int_or_none(fmt.get('contentLength')), 'format_note': quality_label, 'fps': int_or_none(fmt.get('fps')), 'height': int_or_none(fmt.get('height')), # bitrate for itag 43 is always 2147483647 'tbr': float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000) if itag != '43' else None, 'width': int_or_none(fmt.get('width')), } for fmt in streaming_formats: if fmt.get('drmFamilies') or fmt.get('drm_families'): continue url = url_or_none(fmt.get('url')) if not url: cipher = fmt.get('cipher') or fmt.get('signatureCipher') if not cipher: continue url_data = compat_parse_qs(cipher) url = url_or_none(try_get(url_data, lambda x: x['url'][0], compat_str)) if not url: continue else: cipher = None url_data = compat_parse_qs(compat_urllib_parse_urlparse(url).query) stream_type = int_or_none(try_get(url_data, lambda x: x['stream_type'][0])) # Unsupported FORMAT_STREAM_TYPE_OTF if stream_type == 3: continue format_id = fmt.get('itag') or url_data['itag'][0] if not format_id: continue format_id = compat_str(format_id) if cipher: if 's' in url_data or self._downloader.params.get('youtube_include_dash_manifest', True): ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")' jsplayer_url_json = self._search_regex( ASSETS_RE, embed_webpage if age_gate else video_webpage, 'JS player URL (1)', default=None) if not jsplayer_url_json and not age_gate: # We need the embed website after all if embed_webpage is None: embed_url = proto + '://www.youtube.com/embed/%s' % video_id embed_webpage = self._download_webpage( embed_url, video_id, 'Downloading embed webpage') jsplayer_url_json = self._search_regex( ASSETS_RE, embed_webpage, 'JS player URL') player_url = json.loads(jsplayer_url_json) if player_url is None: player_url_json = self._search_regex( r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")', video_webpage, 'age gate player URL') player_url = json.loads(player_url_json) if 'sig' in url_data: url += '&signature=' + url_data['sig'][0] elif 's' in url_data: encrypted_sig = url_data['s'][0] if self._downloader.params.get('verbose'): if player_url is None: player_desc = 'unknown' else: player_type, player_version = self._extract_player_info(player_url) player_desc = '%s player %s' % ('flash' if player_type == 'swf' else 'html5', player_version) parts_sizes = self._signature_cache_id(encrypted_sig) self.to_screen('{%s} signature length %s, %s' % (format_id, parts_sizes, player_desc)) signature = self._decrypt_signature( encrypted_sig, video_id, player_url, age_gate) sp = try_get(url_data, lambda x: x['sp'][0], compat_str) or 'signature' url += '&%s=%s' % (sp, signature) if 'ratebypass' not in url: url += '&ratebypass=yes' dct = { 'format_id': format_id, 'url': url, 'player_url': player_url, } if format_id in self._formats: dct.update(self._formats[format_id]) if format_id in formats_spec: dct.update(formats_spec[format_id]) # Some itags are not included in DASH manifest thus corresponding formats will # lack metadata (see https://github.com/ytdl-org/youtube-dl/pull/5993). # Trying to extract metadata from url_encoded_fmt_stream_map entry. mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0]) width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None) if width is None: width = int_or_none(fmt.get('width')) if height is None: height = int_or_none(fmt.get('height')) filesize = int_or_none(url_data.get( 'clen', [None])[0]) or _extract_filesize(url) quality = url_data.get('quality', [None])[0] or fmt.get('quality') quality_label = url_data.get('quality_label', [None])[0] or fmt.get('qualityLabel') tbr = (float_or_none(url_data.get('bitrate', [None])[0], 1000) or float_or_none(fmt.get('bitrate'), 1000)) if format_id != '43' else None fps = int_or_none(url_data.get('fps', [None])[0]) or int_or_none(fmt.get('fps')) more_fields = { 'filesize': filesize, 'tbr': tbr, 'width': width, 'height': height, 'fps': fps, 'format_note': quality_label or quality, } for key, value in more_fields.items(): if value: dct[key] = value type_ = url_data.get('type', [None])[0] or fmt.get('mimeType') if type_: type_split = type_.split(';') kind_ext = type_split[0].split('/') if len(kind_ext) == 2: kind, _ = kind_ext dct['ext'] = mimetype2ext(type_split[0]) if kind in ('audio', 'video'): codecs = None for mobj in re.finditer( r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_): if mobj.group('key') == 'codecs': codecs = mobj.group('val') break if codecs: dct.update(parse_codecs(codecs)) if dct.get('acodec') == 'none' or dct.get('vcodec') == 'none': dct['downloader_options'] = { # Youtube throttles chunks >~10M 'http_chunk_size': 10485760, } formats.append(dct) else: manifest_url = ( url_or_none(try_get( player_response, lambda x: x['streamingData']['hlsManifestUrl'], compat_str)) or url_or_none(try_get( video_info, lambda x: x['hlsvp'][0], compat_str))) if manifest_url: formats = [] m3u8_formats = self._extract_m3u8_formats( manifest_url, video_id, 'mp4', fatal=False) for a_format in m3u8_formats: itag = self._search_regex( r'/itag/(\d+)/', a_format['url'], 'itag', default=None) if itag: a_format['format_id'] = itag if itag in self._formats: dct = self._formats[itag].copy() dct.update(a_format) a_format = dct a_format['player_url'] = player_url # Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True' formats.append(a_format) else: error_message = extract_unavailable_message() if not error_message: error_message = clean_html(try_get( player_response, lambda x: x['playabilityStatus']['reason'], compat_str)) if not error_message: error_message = clean_html( try_get(video_info, lambda x: x['reason'][0], compat_str)) if error_message: raise ExtractorError(error_message, expected=True) raise ExtractorError('no conn, hlsvp, hlsManifestUrl or url_encoded_fmt_stream_map information found in video info') # uploader video_uploader = try_get( video_info, lambda x: x['author'][0], compat_str) or str_or_none(video_details.get('author')) if video_uploader: video_uploader = compat_urllib_parse_unquote_plus(video_uploader) else: self._downloader.report_warning('unable to extract uploader name') # uploader_id video_uploader_id = None video_uploader_url = None mobj = re.search( r'<link itemprop="url" href="(?P<uploader_url>https?://www\.youtube\.com/(?:user|channel)/(?P<uploader_id>[^"]+))">', video_webpage) if mobj is not None: video_uploader_id = mobj.group('uploader_id') video_uploader_url = mobj.group('uploader_url') else: self._downloader.report_warning('unable to extract uploader nickname') channel_id = ( str_or_none(video_details.get('channelId')) or self._html_search_meta( 'channelId', video_webpage, 'channel id', default=None) or self._search_regex( r'data-channel-external-id=(["\'])(?P<id>(?:(?!\1).)+)\1', video_webpage, 'channel id', default=None, group='id')) channel_url = 'http://www.youtube.com/channel/%s' % channel_id if channel_id else None # thumbnail image # We try first to get a high quality image: m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">', video_webpage, re.DOTALL) if m_thumb is not None: video_thumbnail = m_thumb.group(1) elif 'thumbnail_url' not in video_info: self._downloader.report_warning('unable to extract video thumbnail') video_thumbnail = None else: # don't panic if we can't find it video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0]) # upload date upload_date = self._html_search_meta( 'datePublished', video_webpage, 'upload date', default=None) if not upload_date: upload_date = self._search_regex( [r'(?s)id="eow-date.*?>(.*?)</span>', r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'], video_webpage, 'upload date', default=None) upload_date = unified_strdate(upload_date) video_license = self._html_search_regex( r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li', video_webpage, 'license', default=None) m_music = re.search( r'''(?x) <h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s* <ul[^>]*>\s* <li>(?P<title>.+?) by (?P<creator>.+?) (?: \(.+?\)| <a[^>]* (?: \bhref=["\']/red[^>]*>| # drop possible >\s*Listen ad-free with YouTube Red # YouTube Red ad ) .*? )?</li ''', video_webpage) if m_music: video_alt_title = remove_quotes(unescapeHTML(m_music.group('title'))) video_creator = clean_html(m_music.group('creator')) else: video_alt_title = video_creator = None def extract_meta(field): return self._html_search_regex( r'<h4[^>]+class="title"[^>]*>\s*%s\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li>\s*' % field, video_webpage, field, default=None) track = extract_meta('Song') artist = extract_meta('Artist') album = extract_meta('Album') # Youtube Music Auto-generated description release_date = release_year = None if video_description: mobj = re.search(r'(?s)Provided to YouTube by [^\n]+\n+(?P<track>[^·]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?', video_description) if mobj: if not track: track = mobj.group('track').strip() if not artist: artist = mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·')) if not album: album = mobj.group('album'.strip()) release_year = mobj.group('release_year') release_date = mobj.group('release_date') if release_date: release_date = release_date.replace('-', '') if not release_year: release_year = int(release_date[:4]) if release_year: release_year = int(release_year) m_episode = re.search( r'<div[^>]+id="watch7-headline"[^>]*>\s*<span[^>]*>.*?>(?P<series>[^<]+)</a></b>\s*S(?P<season>\d+)\s*•\s*E(?P<episode>\d+)</span>', video_webpage) if m_episode: series = unescapeHTML(m_episode.group('series')) season_number = int(m_episode.group('season')) episode_number = int(m_episode.group('episode')) else: series = season_number = episode_number = None m_cat_container = self._search_regex( r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>', video_webpage, 'categories', default=None) if m_cat_container: category = self._html_search_regex( r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category', default=None) video_categories = None if category is None else [category] else: video_categories = None video_tags = [ unescapeHTML(m.group('content')) for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)] def _extract_count(count_name): return str_to_int(self._search_regex( r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>' % re.escape(count_name), video_webpage, count_name, default=None)) like_count = _extract_count('like') dislike_count = _extract_count('dislike') if view_count is None: view_count = str_to_int(self._search_regex( r'<[^>]+class=["\']watch-view-count[^>]+>\s*([\d,\s]+)', video_webpage, 'view count', default=None)) average_rating = ( float_or_none(video_details.get('averageRating')) or try_get(video_info, lambda x: float_or_none(x['avg_rating'][0]))) # subtitles video_subtitles = self.extract_subtitles(video_id, video_webpage) automatic_captions = self.extract_automatic_captions(video_id, video_webpage) video_duration = try_get( video_info, lambda x: int_or_none(x['length_seconds'][0])) if not video_duration: video_duration = int_or_none(video_details.get('lengthSeconds')) if not video_duration: video_duration = parse_duration(self._html_search_meta( 'duration', video_webpage, 'video duration')) # annotations video_annotations = None if self._downloader.params.get('writeannotations', False): xsrf_token = self._search_regex( r'([\'"])XSRF_TOKEN\1\s*:\s*([\'"])(?P<xsrf_token>[A-Za-z0-9+/=]+)\2', video_webpage, 'xsrf token', group='xsrf_token', fatal=False) invideo_url = try_get( player_response, lambda x: x['annotations'][0]['playerAnnotationsUrlsRenderer']['invideoUrl'], compat_str) if xsrf_token and invideo_url: xsrf_field_name = self._search_regex( r'([\'"])XSRF_FIELD_NAME\1\s*:\s*([\'"])(?P<xsrf_field_name>\w+)\2', video_webpage, 'xsrf field name', group='xsrf_field_name', default='session_token') video_annotations = self._download_webpage( self._proto_relative_url(invideo_url), video_id, note='Downloading annotations', errnote='Unable to download video annotations', fatal=False, data=urlencode_postdata({xsrf_field_name: xsrf_token})) chapters = self._extract_chapters(video_webpage, description_original, video_id, video_duration) # Look for the DASH manifest if self._downloader.params.get('youtube_include_dash_manifest', True): dash_mpd_fatal = True for mpd_url in dash_mpds: dash_formats = {} try: def decrypt_sig(mobj): s = mobj.group(1) dec_s = self._decrypt_signature(s, video_id, player_url, age_gate) return '/signature/%s' % dec_s mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url) for df in self._extract_mpd_formats( mpd_url, video_id, fatal=dash_mpd_fatal, formats_dict=self._formats): if not df.get('filesize'): df['filesize'] = _extract_filesize(df['url']) # Do not overwrite DASH format found in some previous DASH manifest if df['format_id'] not in dash_formats: dash_formats[df['format_id']] = df # Additional DASH manifests may end up in HTTP Error 403 therefore # allow them to fail without bug report message if we already have # some DASH manifest succeeded. This is temporary workaround to reduce # burst of bug reports until we figure out the reason and whether it # can be fixed at all. dash_mpd_fatal = False except (ExtractorError, KeyError) as e: self.report_warning( 'Skipping DASH manifest: %r' % e, video_id) if dash_formats: # Remove the formats we found through non-DASH, they # contain less info and it can be wrong, because we use # fixed values (for example the resolution). See # https://github.com/ytdl-org/youtube-dl/issues/5774 for an # example. formats = [f for f in formats if f['format_id'] not in dash_formats.keys()] formats.extend(dash_formats.values()) # Check for malformed aspect ratio stretched_m = re.search( r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">', video_webpage) if stretched_m: w = float(stretched_m.group('w')) h = float(stretched_m.group('h')) # yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0). # We will only process correct ratios. if w > 0 and h > 0: ratio = w / h for f in formats: if f.get('vcodec') != 'none': f['stretched_ratio'] = ratio if not formats: if 'reason' in video_info: if 'The uploader has not made this video available in your country.' in video_info['reason']: regions_allowed = self._html_search_meta( 'regionsAllowed', video_webpage, default=None) countries = regions_allowed.split(',') if regions_allowed else None self.raise_geo_restricted( msg=video_info['reason'][0], countries=countries) reason = video_info['reason'][0] if 'Invalid parameters' in reason: unavailable_message = extract_unavailable_message() if unavailable_message: reason = unavailable_message raise ExtractorError( 'YouTube said: %s' % reason, expected=True, video_id=video_id) if video_info.get('license_info') or try_get(player_response, lambda x: x['streamingData']['licenseInfos']): raise ExtractorError('This video is DRM protected.', expected=True) self._sort_formats(formats) self.mark_watched(video_id, video_info, player_response) return { 'id': video_id, 'uploader': video_uploader, 'uploader_id': video_uploader_id, 'uploader_url': video_uploader_url, 'channel_id': channel_id, 'channel_url': channel_url, 'upload_date': upload_date, 'license': video_license, 'creator': video_creator or artist, 'title': video_title, 'alt_title': video_alt_title or track, 'thumbnail': video_thumbnail, 'description': video_description, 'categories': video_categories, 'tags': video_tags, 'subtitles': video_subtitles, 'automatic_captions': automatic_captions, 'duration': video_duration, 'age_limit': 18 if age_gate else 0, 'annotations': video_annotations, 'chapters': chapters, 'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id, 'view_count': view_count, 'like_count': like_count, 'dislike_count': dislike_count, 'average_rating': average_rating, 'formats': formats, 'is_live': is_live, 'start_time': start_time, 'end_time': end_time, 'series': series, 'season_number': season_number, 'episode_number': episode_number, 'track': track, 'artist': artist, 'album': album, 'release_date': release_date, 'release_year': release_year, } class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor): IE_DESC = 'YouTube.com playlists' _VALID_URL = r"""(?x)(?: (?:https?://)? (?:\w+\.)? (?: (?: youtube(?:kids)?\.com| invidio\.us ) / (?: (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/(?:videoseries|[0-9A-Za-z_-]{11})) \? (?:.*?[&;])*? (?:p|a|list)= | p/ )| youtu\.be/[0-9A-Za-z_-]{11}\?.*?\blist= ) ( (?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)?[0-9A-Za-z-_]{10,} # Top tracks, they can also include dots |(?:MC)[\w\.]* ) .* | (%(playlist_id)s) )""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE} _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s' _VIDEO_RE_TPL = r'href="\s*/watch\?v=%s(?:&amp;(?:[^"]*?index=(?P<index>\d+))?(?:[^>]+>(?P<title>[^<]+))?)?' _VIDEO_RE = _VIDEO_RE_TPL % r'(?P<id>[0-9A-Za-z_-]{11})' IE_NAME = 'youtube:playlist' _TESTS = [{ 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc', 'info_dict': { 'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA', 'uploader': 'Sergey M.', 'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc', 'title': 'youtube-dl public playlist', }, 'playlist_count': 1, }, { 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf', 'info_dict': { 'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA', 'uploader': 'Sergey M.', 'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf', 'title': 'youtube-dl empty playlist', }, 'playlist_count': 0, }, { 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.', 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC', 'info_dict': { 'title': '29C3: Not my department', 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC', 'uploader': 'Christiaan008', 'uploader_id': 'ChRiStIaAn008', }, 'playlist_count': 96, }, { 'note': 'issue #673', 'url': 'PLBB231211A4F62143', 'info_dict': { 'title': '[OLD]Team Fortress 2 (Class-based LP)', 'id': 'PLBB231211A4F62143', 'uploader': 'Wickydoo', 'uploader_id': 'Wickydoo', }, 'playlist_mincount': 26, }, { 'note': 'Large playlist', 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q', 'info_dict': { 'title': 'Uploads from Cauchemar', 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q', 'uploader': 'Cauchemar', 'uploader_id': 'Cauchemar89', }, 'playlist_mincount': 799, }, { 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl', 'info_dict': { 'title': 'YDL_safe_search', 'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl', }, 'playlist_count': 2, 'skip': 'This playlist is private', }, { 'note': 'embedded', 'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu', 'playlist_count': 4, 'info_dict': { 'title': 'JODA15', 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu', 'uploader': 'milan', 'uploader_id': 'UCEI1-PVPcYXjB73Hfelbmaw', } }, { 'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl', 'playlist_mincount': 485, 'info_dict': { 'title': '2018 Chinese New Singles (11/6 updated)', 'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl', 'uploader': 'LBK', 'uploader_id': 'sdragonfang', } }, { 'note': 'Embedded SWF player', 'url': 'https://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0', 'playlist_count': 4, 'info_dict': { 'title': 'JODA7', 'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ', }, 'skip': 'This playlist does not exist', }, { 'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos', 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA', 'info_dict': { 'title': 'Uploads from Interstellar Movie', 'id': 'UUXw-G3eDE9trcvY2sBMM_aA', 'uploader': 'Interstellar Movie', 'uploader_id': 'InterstellarMovie1', }, 'playlist_mincount': 21, }, { # Playlist URL that does not actually serve a playlist 'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4', 'info_dict': { 'id': 'FqZTN594JQw', 'ext': 'webm', 'title': "Smiley's People 01 detective, Adventure Series, Action", 'uploader': 'STREEM', 'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng', 'upload_date': '20150526', 'license': 'Standard YouTube License', 'description': 'md5:507cdcb5a49ac0da37a920ece610be80', 'categories': ['People & Blogs'], 'tags': list, 'view_count': int, 'like_count': int, 'dislike_count': int, }, 'params': { 'skip_download': True, }, 'skip': 'This video is not available.', 'add_ie': [YoutubeIE.ie_key()], }, { 'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5', 'info_dict': { 'id': 'yeWKywCrFtk', 'ext': 'mp4', 'title': 'Small Scale Baler and Braiding Rugs', 'uploader': 'Backus-Page House Museum', 'uploader_id': 'backuspagemuseum', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum', 'upload_date': '20161008', 'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a', 'categories': ['Nonprofits & Activism'], 'tags': list, 'like_count': int, 'dislike_count': int, }, 'params': { 'noplaylist': True, 'skip_download': True, }, }, { # https://github.com/ytdl-org/youtube-dl/issues/21844 'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba', 'info_dict': { 'title': 'Data Analysis with Dr Mike Pound', 'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba', 'uploader_id': 'Computerphile', 'uploader': 'Computerphile', }, 'playlist_mincount': 11, }, { 'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21', 'only_matching': True, }, { 'url': 'TLGGrESM50VT6acwMjAyMjAxNw', 'only_matching': True, }, { # music album playlist 'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM', 'only_matching': True, }, { 'url': 'https://invidio.us/playlist?list=PLDIoUOhQQPlXr63I_vwF9GD8sAKh77dWU', 'only_matching': True, }, { 'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g', 'only_matching': True, }] def _real_initialize(self): self._login() def extract_videos_from_page(self, page): ids_in_page = [] titles_in_page = [] for item in re.findall( r'(<[^>]*\bdata-video-id\s*=\s*["\'][0-9A-Za-z_-]{11}[^>]+>)', page): attrs = extract_attributes(item) video_id = attrs['data-video-id'] video_title = unescapeHTML(attrs.get('data-title')) if video_title: video_title = video_title.strip() ids_in_page.append(video_id) titles_in_page.append(video_title) # Fallback with old _VIDEO_RE self.extract_videos_from_page_impl( self._VIDEO_RE, page, ids_in_page, titles_in_page) # Relaxed fallbacks self.extract_videos_from_page_impl( r'href="\s*/watch\?v\s*=\s*(?P<id>[0-9A-Za-z_-]{11})', page, ids_in_page, titles_in_page) self.extract_videos_from_page_impl( r'data-video-ids\s*=\s*["\'](?P<id>[0-9A-Za-z_-]{11})', page, ids_in_page, titles_in_page) return zip(ids_in_page, titles_in_page) def _extract_mix(self, playlist_id): # The mixes are generated from a single video # the id of the playlist is just 'RD' + video_id ids = [] last_id = playlist_id[-11:] for n in itertools.count(1): url = 'https://youtube.com/watch?v=%s&list=%s' % (last_id, playlist_id) webpage = self._download_webpage( url, playlist_id, 'Downloading page {0} of Youtube mix'.format(n)) new_ids = orderedSet(re.findall( r'''(?xs)data-video-username=".*?".*? href="/watch\?v=([0-9A-Za-z_-]{11})&amp;[^"]*?list=%s''' % re.escape(playlist_id), webpage)) # Fetch new pages until all the videos are repeated, it seems that # there are always 51 unique videos. new_ids = [_id for _id in new_ids if _id not in ids] if not new_ids: break ids.extend(new_ids) last_id = ids[-1] url_results = self._ids_to_results(ids) search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage) title_span = ( search_title('playlist-title') or search_title('title long-title') or search_title('title')) title = clean_html(title_span) return self.playlist_result(url_results, playlist_id, title) def _extract_playlist(self, playlist_id): url = self._TEMPLATE_URL % playlist_id page = self._download_webpage(url, playlist_id) # the yt-alert-message now has tabindex attribute (see https://github.com/ytdl-org/youtube-dl/issues/11604) for match in re.findall(r'<div class="yt-alert-message"[^>]*>([^<]+)</div>', page): match = match.strip() # Check if the playlist exists or is private mobj = re.match(r'[^<]*(?:The|This) playlist (?P<reason>does not exist|is private)[^<]*', match) if mobj: reason = mobj.group('reason') message = 'This playlist %s' % reason if 'private' in reason: message += ', use --username or --netrc to access it' message += '.' raise ExtractorError(message, expected=True) elif re.match(r'[^<]*Invalid parameters[^<]*', match): raise ExtractorError( 'Invalid parameters. Maybe URL is incorrect.', expected=True) elif re.match(r'[^<]*Choose your language[^<]*', match): continue else: self.report_warning('Youtube gives an alert message: ' + match) playlist_title = self._html_search_regex( r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>', page, 'title', default=None) _UPLOADER_BASE = r'class=["\']pl-header-details[^>]+>\s*<li>\s*<a[^>]+\bhref=' uploader = self._html_search_regex( r'%s["\']/(?:user|channel)/[^>]+>([^<]+)' % _UPLOADER_BASE, page, 'uploader', default=None) mobj = re.search( r'%s(["\'])(?P<path>/(?:user|channel)/(?P<uploader_id>.+?))\1' % _UPLOADER_BASE, page) if mobj: uploader_id = mobj.group('uploader_id') uploader_url = compat_urlparse.urljoin(url, mobj.group('path')) else: uploader_id = uploader_url = None has_videos = True if not playlist_title: try: # Some playlist URLs don't actually serve a playlist (e.g. # https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4) next(self._entries(page, playlist_id)) except StopIteration: has_videos = False playlist = self.playlist_result( self._entries(page, playlist_id), playlist_id, playlist_title) playlist.update({ 'uploader': uploader, 'uploader_id': uploader_id, 'uploader_url': uploader_url, }) return has_videos, playlist def _check_download_just_video(self, url, playlist_id): # Check if it's a video-specific URL query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query) video_id = query_dict.get('v', [None])[0] or self._search_regex( r'(?:(?:^|//)youtu\.be/|youtube\.com/embed/(?!videoseries))([0-9A-Za-z_-]{11})', url, 'video id', default=None) if video_id: if self._downloader.params.get('noplaylist'): self.to_screen('Downloading just video %s because of --no-playlist' % video_id) return video_id, self.url_result(video_id, 'Youtube', video_id=video_id) else: self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id)) return video_id, None return None, None def _real_extract(self, url): # Extract playlist id mobj = re.match(self._VALID_URL, url) if mobj is None: raise ExtractorError('Invalid URL: %s' % url) playlist_id = mobj.group(1) or mobj.group(2) video_id, video = self._check_download_just_video(url, playlist_id) if video: return video if playlist_id.startswith(('RD', 'UL', 'PU')): # Mixes require a custom extraction process return self._extract_mix(playlist_id) has_videos, playlist = self._extract_playlist(playlist_id) if has_videos or not video_id: return playlist # Some playlist URLs don't actually serve a playlist (see # https://github.com/ytdl-org/youtube-dl/issues/10537). # Fallback to plain video extraction if there is a video id # along with playlist id. return self.url_result(video_id, 'Youtube', video_id=video_id) class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor): IE_DESC = 'YouTube.com channels' _VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie|kids)?\.com|(?:www\.)?invidio\.us)/channel/(?P<id>[0-9A-Za-z_-]+)' _TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos' _VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?' IE_NAME = 'youtube:channel' _TESTS = [{ 'note': 'paginated channel', 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w', 'playlist_mincount': 91, 'info_dict': { 'id': 'UUKfVa3S1e4PHvxWcwyMMg8w', 'title': 'Uploads from lex will', 'uploader': 'lex will', 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w', } }, { 'note': 'Age restricted channel', # from https://www.youtube.com/user/DeusExOfficial 'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w', 'playlist_mincount': 64, 'info_dict': { 'id': 'UUs0ifCMCm1icqRbqhUINa0w', 'title': 'Uploads from Deus Ex', 'uploader': 'Deus Ex', 'uploader_id': 'DeusExOfficial', }, }, { 'url': 'https://invidio.us/channel/UC23qupoDRn9YOAVzeoxjOQA', 'only_matching': True, }, { 'url': 'https://www.youtubekids.com/channel/UCyu8StPfZWapR6rfW_JgqcA', 'only_matching': True, }] @classmethod def suitable(cls, url): return (False if YoutubePlaylistsIE.suitable(url) or YoutubeLiveIE.suitable(url) else super(YoutubeChannelIE, cls).suitable(url)) def _build_template_url(self, url, channel_id): return self._TEMPLATE_URL % channel_id def _real_extract(self, url): channel_id = self._match_id(url) url = self._build_template_url(url, channel_id) # Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778) # Workaround by extracting as a playlist if managed to obtain channel playlist URL # otherwise fallback on channel by page extraction channel_page = self._download_webpage( url + '?view=57', channel_id, 'Downloading channel page', fatal=False) if channel_page is False: channel_playlist_id = False else: channel_playlist_id = self._html_search_meta( 'channelId', channel_page, 'channel id', default=None) if not channel_playlist_id: channel_url = self._html_search_meta( ('al:ios:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad'), channel_page, 'channel url', default=None) if channel_url: channel_playlist_id = self._search_regex( r'vnd\.youtube://user/([0-9A-Za-z_-]+)', channel_url, 'channel id', default=None) if channel_playlist_id and channel_playlist_id.startswith('UC'): playlist_id = 'UU' + channel_playlist_id[2:] return self.url_result( compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist') channel_page = self._download_webpage(url, channel_id, 'Downloading page #1') autogenerated = re.search(r'''(?x) class="[^"]*?(?: channel-header-autogenerated-label| yt-channel-title-autogenerated )[^"]*"''', channel_page) is not None if autogenerated: # The videos are contained in a single page # the ajax pages can't be used, they are empty entries = [ self.url_result( video_id, 'Youtube', video_id=video_id, video_title=video_title) for video_id, video_title in self.extract_videos_from_page(channel_page)] return self.playlist_result(entries, channel_id) try: next(self._entries(channel_page, channel_id)) except StopIteration: alert_message = self._html_search_regex( r'(?s)<div[^>]+class=(["\']).*?\byt-alert-message\b.*?\1[^>]*>(?P<alert>[^<]+)</div>', channel_page, 'alert', default=None, group='alert') if alert_message: raise ExtractorError('Youtube said: %s' % alert_message, expected=True) return self.playlist_result(self._entries(channel_page, channel_id), channel_id) class YoutubeUserIE(YoutubeChannelIE): IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)' _VALID_URL = r'(?:(?:https?://(?:\w+\.)?youtube\.com/(?:(?P<user>user|c)/)?(?!(?:attribution_link|watch|results|shared)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)' _TEMPLATE_URL = 'https://www.youtube.com/%s/%s/videos' IE_NAME = 'youtube:user' _TESTS = [{ 'url': 'https://www.youtube.com/user/TheLinuxFoundation', 'playlist_mincount': 320, 'info_dict': { 'id': 'UUfX55Sx5hEFjoC3cNs6mCUQ', 'title': 'Uploads from The Linux Foundation', 'uploader': 'The Linux Foundation', 'uploader_id': 'TheLinuxFoundation', } }, { # Only available via https://www.youtube.com/c/12minuteathlete/videos # but not https://www.youtube.com/user/12minuteathlete/videos 'url': 'https://www.youtube.com/c/12minuteathlete/videos', 'playlist_mincount': 249, 'info_dict': { 'id': 'UUVjM-zV6_opMDx7WYxnjZiQ', 'title': 'Uploads from 12 Minute Athlete', 'uploader': '12 Minute Athlete', 'uploader_id': 'the12minuteathlete', } }, { 'url': 'ytuser:phihag', 'only_matching': True, }, { 'url': 'https://www.youtube.com/c/gametrailers', 'only_matching': True, }, { 'url': 'https://www.youtube.com/gametrailers', 'only_matching': True, }, { # This channel is not available, geo restricted to JP 'url': 'https://www.youtube.com/user/kananishinoSMEJ/videos', 'only_matching': True, }] @classmethod def suitable(cls, url): # Don't return True if the url can be extracted with other youtube # extractor, the regex would is too permissive and it would match. other_yt_ies = iter(klass for (name, klass) in globals().items() if name.startswith('Youtube') and name.endswith('IE') and klass is not cls) if any(ie.suitable(url) for ie in other_yt_ies): return False else: return super(YoutubeUserIE, cls).suitable(url) def _build_template_url(self, url, channel_id): mobj = re.match(self._VALID_URL, url) return self._TEMPLATE_URL % (mobj.group('user') or 'user', mobj.group('id')) class YoutubeLiveIE(YoutubeBaseInfoExtractor): IE_DESC = 'YouTube.com live streams' _VALID_URL = r'(?P<base_url>https?://(?:\w+\.)?youtube\.com/(?:(?:user|channel|c)/)?(?P<id>[^/]+))/live' IE_NAME = 'youtube:live' _TESTS = [{ 'url': 'https://www.youtube.com/user/TheYoungTurks/live', 'info_dict': { 'id': 'a48o2S1cPoo', 'ext': 'mp4', 'title': 'The Young Turks - Live Main Show', 'uploader': 'The Young Turks', 'uploader_id': 'TheYoungTurks', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks', 'upload_date': '20150715', 'license': 'Standard YouTube License', 'description': 'md5:438179573adcdff3c97ebb1ee632b891', 'categories': ['News & Politics'], 'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'], 'like_count': int, 'dislike_count': int, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live', 'only_matching': True, }, { 'url': 'https://www.youtube.com/c/CommanderVideoHq/live', 'only_matching': True, }, { 'url': 'https://www.youtube.com/TheYoungTurks/live', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) channel_id = mobj.group('id') base_url = mobj.group('base_url') webpage = self._download_webpage(url, channel_id, fatal=False) if webpage: page_type = self._og_search_property( 'type', webpage, 'page type', default='') video_id = self._html_search_meta( 'videoId', webpage, 'video id', default=None) if page_type.startswith('video') and video_id and re.match( r'^[0-9A-Za-z_-]{11}$', video_id): return self.url_result(video_id, YoutubeIE.ie_key()) return self.url_result(base_url) class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor): IE_DESC = 'YouTube.com user/channel playlists' _VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists' IE_NAME = 'youtube:playlists' _TESTS = [{ 'url': 'https://www.youtube.com/user/ThirstForScience/playlists', 'playlist_mincount': 4, 'info_dict': { 'id': 'ThirstForScience', 'title': 'ThirstForScience', }, }, { # with "Load more" button 'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd', 'playlist_mincount': 70, 'info_dict': { 'id': 'igorkle1', 'title': 'Игорь Клейнер', }, }, { 'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists', 'playlist_mincount': 17, 'info_dict': { 'id': 'UCiU1dHvZObB2iP6xkJ__Icw', 'title': 'Chem Player', }, 'skip': 'Blocked', }] class YoutubeSearchBaseInfoExtractor(YoutubePlaylistBaseInfoExtractor): _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})(?:[^"]*"[^>]+\btitle="(?P<title>[^"]+))?' class YoutubeSearchIE(SearchInfoExtractor, YoutubeSearchBaseInfoExtractor): IE_DESC = 'YouTube.com searches' # there doesn't appear to be a real limit, for example if you search for # 'python' you get more than 8.000.000 results _MAX_RESULTS = float('inf') IE_NAME = 'youtube:search' _SEARCH_KEY = 'ytsearch' _EXTRA_QUERY_ARGS = {} _TESTS = [] def _get_n_results(self, query, n): """Get a specified number of results for a query""" videos = [] limit = n url_query = { 'search_query': query.encode('utf-8'), } url_query.update(self._EXTRA_QUERY_ARGS) result_url = 'https://www.youtube.com/results?' + compat_urllib_parse_urlencode(url_query) for pagenum in itertools.count(1): data = self._download_json( result_url, video_id='query "%s"' % query, note='Downloading page %s' % pagenum, errnote='Unable to download API page', query={'spf': 'navigate'}) html_content = data[1]['body']['content'] if 'class="search-message' in html_content: raise ExtractorError( '[youtube] No video results', expected=True) new_videos = list(self._process_page(html_content)) videos += new_videos if not new_videos or len(videos) > limit: break next_link = self._html_search_regex( r'href="(/results\?[^"]*\bsp=[^"]+)"[^>]*>\s*<span[^>]+class="[^"]*\byt-uix-button-content\b[^"]*"[^>]*>Next', html_content, 'next link', default=None) if next_link is None: break result_url = compat_urlparse.urljoin('https://www.youtube.com/', next_link) if len(videos) > n: videos = videos[:n] return self.playlist_result(videos, query) class YoutubeSearchDateIE(YoutubeSearchIE): IE_NAME = YoutubeSearchIE.IE_NAME + ':date' _SEARCH_KEY = 'ytsearchdate' IE_DESC = 'YouTube.com searches, newest videos first' _EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'} class YoutubeSearchURLIE(YoutubeSearchBaseInfoExtractor): IE_DESC = 'YouTube.com search URLs' IE_NAME = 'youtube:search_url' _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)' _TESTS = [{ 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', 'playlist_mincount': 5, 'info_dict': { 'title': 'youtube-dl test video', } }, { 'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) query = compat_urllib_parse_unquote_plus(mobj.group('query')) webpage = self._download_webpage(url, query) return self.playlist_result(self._process_page(webpage), playlist_title=query) class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor): IE_DESC = 'YouTube.com (multi-season) shows' _VALID_URL = r'https?://(?:www\.)?youtube\.com/show/(?P<id>[^?#]*)' IE_NAME = 'youtube:show' _TESTS = [{ 'url': 'https://www.youtube.com/show/airdisasters', 'playlist_mincount': 5, 'info_dict': { 'id': 'airdisasters', 'title': 'Air Disasters', } }] def _real_extract(self, url): playlist_id = self._match_id(url) return super(YoutubeShowIE, self)._real_extract( 'https://www.youtube.com/show/%s/playlists' % playlist_id) class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor): """ Base class for feed extractors Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties. """ _LOGIN_REQUIRED = True @property def IE_NAME(self): return 'youtube:%s' % self._FEED_NAME def _real_initialize(self): self._login() def _entries(self, page): # The extraction process is the same as for playlists, but the regex # for the video ids doesn't contain an index ids = [] more_widget_html = content_html = page for page_num in itertools.count(1): matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html) # 'recommended' feed has infinite 'load more' and each new portion spins # the same videos in (sometimes) slightly different order, so we'll check # for unicity and break when portion has no new videos new_ids = list(filter(lambda video_id: video_id not in ids, orderedSet(matches))) if not new_ids: break ids.extend(new_ids) for entry in self._ids_to_results(new_ids): yield entry mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html) if not mobj: break more = self._download_json( 'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE, 'Downloading page #%s' % page_num, transform_source=uppercase_escape) content_html = more['content_html'] more_widget_html = more['load_more_widget_html'] def _real_extract(self, url): page = self._download_webpage( 'https://www.youtube.com/feed/%s' % self._FEED_NAME, self._PLAYLIST_TITLE) return self.playlist_result( self._entries(page), playlist_title=self._PLAYLIST_TITLE) class YoutubeWatchLaterIE(YoutubePlaylistIE): IE_NAME = 'youtube:watchlater' IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)' _VALID_URL = r'https?://(?:www\.)?youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater' _TESTS = [{ 'url': 'https://www.youtube.com/playlist?list=WL', 'only_matching': True, }, { 'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL', 'only_matching': True, }] def _real_extract(self, url): _, video = self._check_download_just_video(url, 'WL') if video: return video _, playlist = self._extract_playlist('WL') return playlist class YoutubeFavouritesIE(YoutubeBaseInfoExtractor): IE_NAME = 'youtube:favorites' IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)' _VALID_URL = r'https?://(?:www\.)?youtube\.com/my_favorites|:ytfav(?:ou?rites)?' _LOGIN_REQUIRED = True def _real_extract(self, url): webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos') playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id') return self.url_result(playlist_id, 'YoutubePlaylist') class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor): IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)' _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/recommended|:ytrec(?:ommended)?' _FEED_NAME = 'recommended' _PLAYLIST_TITLE = 'Youtube Recommended videos' class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor): IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)' _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?' _FEED_NAME = 'subscriptions' _PLAYLIST_TITLE = 'Youtube Subscriptions' class YoutubeHistoryIE(YoutubeFeedsInfoExtractor): IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)' _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/history|:ythistory' _FEED_NAME = 'history' _PLAYLIST_TITLE = 'Youtube History' class YoutubeTruncatedURLIE(InfoExtractor): IE_NAME = 'youtube:truncated_url' IE_DESC = False # Do not list _VALID_URL = r'''(?x) (?:https?://)? (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/ (?:watch\?(?: feature=[a-z_]+| annotation_id=annotation_[^&]+| x-yt-cl=[0-9]+| hl=[^&]*| t=[0-9]+ )? | attribution_link\?a=[^&]+ ) $ ''' _TESTS = [{ 'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041', 'only_matching': True, }, { 'url': 'https://www.youtube.com/watch?', 'only_matching': True, }, { 'url': 'https://www.youtube.com/watch?x-yt-cl=84503534', 'only_matching': True, }, { 'url': 'https://www.youtube.com/watch?feature=foo', 'only_matching': True, }, { 'url': 'https://www.youtube.com/watch?hl=en-GB', 'only_matching': True, }, { 'url': 'https://www.youtube.com/watch?t=2372', 'only_matching': True, }] def _real_extract(self, url): raise ExtractorError( 'Did you forget to quote the URL? Remember that & is a meta ' 'character in most shells, so you want to put the URL in quotes, ' 'like youtube-dl ' '"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" ' ' or simply youtube-dl BaW_jenozKc .', expected=True) class YoutubeTruncatedIDIE(InfoExtractor): IE_NAME = 'youtube:truncated_id' IE_DESC = False # Do not list _VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$' _TESTS = [{ 'url': 'https://www.youtube.com/watch?v=N_708QY7Ob', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) raise ExtractorError( 'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url), expected=True)
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-14-fixed/youtube-dl/youtube_dl/extractor/youtube.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-14-buggy/youtube-dl/youtube_dl/extractor/youtube.py
youtube-dl-bug-41
#!/usr/bin/env python # -*- coding: utf-8 -*- import ctypes import datetime import email.utils import errno import gzip import itertools import io import json import locale import math import os import pipes import platform import re import ssl import socket import subprocess import sys import traceback import zlib try: import urllib.request as compat_urllib_request except ImportError: # Python 2 import urllib2 as compat_urllib_request try: import urllib.error as compat_urllib_error except ImportError: # Python 2 import urllib2 as compat_urllib_error try: import urllib.parse as compat_urllib_parse except ImportError: # Python 2 import urllib as compat_urllib_parse try: from urllib.parse import urlparse as compat_urllib_parse_urlparse except ImportError: # Python 2 from urlparse import urlparse as compat_urllib_parse_urlparse try: import urllib.parse as compat_urlparse except ImportError: # Python 2 import urlparse as compat_urlparse try: import http.cookiejar as compat_cookiejar except ImportError: # Python 2 import cookielib as compat_cookiejar try: import html.entities as compat_html_entities except ImportError: # Python 2 import htmlentitydefs as compat_html_entities try: import html.parser as compat_html_parser except ImportError: # Python 2 import HTMLParser as compat_html_parser try: import http.client as compat_http_client except ImportError: # Python 2 import httplib as compat_http_client try: from urllib.error import HTTPError as compat_HTTPError except ImportError: # Python 2 from urllib2 import HTTPError as compat_HTTPError try: from urllib.request import urlretrieve as compat_urlretrieve except ImportError: # Python 2 from urllib import urlretrieve as compat_urlretrieve try: from subprocess import DEVNULL compat_subprocess_get_DEVNULL = lambda: DEVNULL except ImportError: compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w') try: from urllib.parse import parse_qs as compat_parse_qs except ImportError: # Python 2 # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib. # Python 2's version is apparently totally broken def _unquote(string, encoding='utf-8', errors='replace'): if string == '': return string res = string.split('%') if len(res) == 1: return string if encoding is None: encoding = 'utf-8' if errors is None: errors = 'replace' # pct_sequence: contiguous sequence of percent-encoded bytes, decoded pct_sequence = b'' string = res[0] for item in res[1:]: try: if not item: raise ValueError pct_sequence += item[:2].decode('hex') rest = item[2:] if not rest: # This segment was just a single percent-encoded character. # May be part of a sequence of code units, so delay decoding. # (Stored in pct_sequence). continue except ValueError: rest = '%' + item # Encountered non-percent-encoded characters. Flush the current # pct_sequence. string += pct_sequence.decode(encoding, errors) + rest pct_sequence = b'' if pct_sequence: # Flush the final pct_sequence string += pct_sequence.decode(encoding, errors) return string def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace'): qs, _coerce_result = qs, unicode pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] r = [] for name_value in pairs: if not name_value and not strict_parsing: continue nv = name_value.split('=', 1) if len(nv) != 2: if strict_parsing: raise ValueError("bad query field: %r" % (name_value,)) # Handle case of a control-name with no equal sign if keep_blank_values: nv.append('') else: continue if len(nv[1]) or keep_blank_values: name = nv[0].replace('+', ' ') name = _unquote(name, encoding=encoding, errors=errors) name = _coerce_result(name) value = nv[1].replace('+', ' ') value = _unquote(value, encoding=encoding, errors=errors) value = _coerce_result(value) r.append((name, value)) return r def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace'): parsed_result = {} pairs = _parse_qsl(qs, keep_blank_values, strict_parsing, encoding=encoding, errors=errors) for name, value in pairs: if name in parsed_result: parsed_result[name].append(value) else: parsed_result[name] = [value] return parsed_result try: compat_str = unicode # Python 2 except NameError: compat_str = str try: compat_chr = unichr # Python 2 except NameError: compat_chr = chr def compat_ord(c): if type(c) is int: return c else: return ord(c) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() u'TEST'.encode(pref) except: pref = 'UTF-8' return pref if sys.version_info < (3,0): def compat_print(s): print(s.encode(preferredencoding(), 'xmlcharrefreplace')) else: def compat_print(s): assert type(s) == type(u'') print(s) # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3,0): def write_json_file(obj, fn): with open(fn, 'wb') as f: json.dump(obj, f) else: def write_json_file(obj, fn): with open(fn, 'w', encoding='utf-8') as f: json.dump(obj, f) if sys.version_info >= (2,7): def find_xpath_attr(node, xpath, key, val): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z]+$', key) assert re.match(r'^[a-zA-Z0-9@\s:._]*$', val) expr = xpath + u"[@%s='%s']" % (key, val) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val): for f in node.findall(xpath): if f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def htmlentity_transform(matchobj): """Transforms an HTML entity to a character. This function receives a match object and is intended to be used with the re.sub() function. """ entity = matchobj.group(1) # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) mobj = re.match(u'(?u)#(x?\\d+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith(u'x'): base = 16 numstr = u'0%s' % numstr else: base = 10 return compat_chr(int(numstr, base)) # Unknown entity in name, return its literal representation return (u'&%s;' % entity) compat_html_parser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix class BaseHTMLParser(compat_html_parser.HTMLParser): def __init(self): compat_html_parser.HTMLParser.__init__(self) self.html = None def loads(self, html): self.html = html self.feed(html) self.close() class AttrParser(BaseHTMLParser): """Modified HTMLParser that isolates a tag with the specified attribute""" def __init__(self, attribute, value): self.attribute = attribute self.value = value self.result = None self.started = False self.depth = {} self.watch_startpos = False self.error_count = 0 BaseHTMLParser.__init__(self) def error(self, message): if self.error_count > 10 or self.started: raise compat_html_parser.HTMLParseError(message, self.getpos()) self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line self.error_count += 1 self.goahead(1) def handle_starttag(self, tag, attrs): attrs = dict(attrs) if self.started: self.find_startpos(None) if self.attribute in attrs and attrs[self.attribute] == self.value: self.result = [tag] self.started = True self.watch_startpos = True if self.started: if not tag in self.depth: self.depth[tag] = 0 self.depth[tag] += 1 def handle_endtag(self, tag): if self.started: if tag in self.depth: self.depth[tag] -= 1 if self.depth[self.result[0]] == 0: self.started = False self.result.append(self.getpos()) def find_startpos(self, x): """Needed to put the start position of the result (self.result[1]) after the opening tag with the requested id""" if self.watch_startpos: self.watch_startpos = False self.result.append(self.getpos()) handle_entityref = handle_charref = handle_data = handle_comment = \ handle_decl = handle_pi = unknown_decl = find_startpos def get_result(self): if self.result is None: return None if len(self.result) != 3: return None lines = self.html.split('\n') lines = lines[self.result[1][0]-1:self.result[2][0]] lines[0] = lines[0][self.result[1][1]:] if len(lines) == 1: lines[-1] = lines[-1][:self.result[2][1]-self.result[1][1]] lines[-1] = lines[-1][:self.result[2][1]] return '\n'.join(lines).strip() # Hack for https://github.com/rg3/youtube-dl/issues/662 if sys.version_info < (2, 7, 3): AttrParser.parse_endtag = (lambda self, i: i + len("</scr'+'ipt>") if self.rawdata[i:].startswith("</scr'+'ipt>") else compat_html_parser.HTMLParser.parse_endtag(self, i)) def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute("id", id, html) def get_element_by_attribute(attribute, value, html): """Return the content of the tag with the specified attribute in the passed HTML document""" parser = AttrParser(attribute, value) try: parser.loads(html) except compat_html_parser.HTMLParseError: pass return parser.get_result() class MetaParser(BaseHTMLParser): """ Modified HTMLParser that isolates a meta tag with the specified name attribute. """ def __init__(self, name): BaseHTMLParser.__init__(self) self.name = name self.content = None self.result = None def handle_starttag(self, tag, attrs): if tag != 'meta': return attrs = dict(attrs) if attrs.get('name') == self.name: self.result = attrs.get('content') def get_result(self): return self.result def get_meta_content(name, html): """ Return the content attribute from the meta tag with the given name attribute. """ parser = MetaParser(name) try: parser.loads(html) except compat_html_parser.HTMLParseError: pass return parser.get_result() def clean_html(html): """Clean an HTML snippet into a readable string""" # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == u'-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = os.path.join( re.sub(u'[/<>:"\\|\\\\?\\*]', u'#', path_part) for path_part in os.path.split(filename) ) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char result = u''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if not result: result = '_' return result def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def unescapeHTML(s): """ @param s a string """ assert type(s) == type(u'') result = re.sub(u'(?u)&(.+?);', htmlentity_transform, s) return result def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # Pass u'' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess: return s else: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return s.encode(encoding, 'ignore') def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(opts_no_check_certificate, **kwargs): if sys.version_info < (3, 2): import httplib class HTTPSConnectionV3(httplib.HTTPSConnection): def __init__(self, *args, **kwargs): httplib.HTTPSConnection.__init__(self, *args, **kwargs) def connect(self): sock = socket.create_connection((self.host, self.port), self.timeout) if getattr(self, '_tunnel_host', False): self.sock = sock self._tunnel() try: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv3) except ssl.SSLError: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv23) class HTTPSHandlerV3(compat_urllib_request.HTTPSHandler): def https_open(self, req): return self.do_open(HTTPSConnectionV3, req) return HTTPSHandlerV3(**kwargs) else: context = ssl.SSLContext(ssl.PROTOCOL_SSLv3) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() try: context.load_default_certs() except AttributeError: pass # Python < 3.4 return compat_urllib_request.HTTPSHandler(context=context, **kwargs) class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if not expected: msg = msg + u'; please report this issue on https://yt-dl.org/bug . Be sure to call youtube-dl with the --verbose flag and include its complete output. Make sure you are using the latest version; type youtube-dl -U to update.' super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause def format_traceback(self): if self.traceback is None: return None return u''.join(traceback.format_tb(self.traceback)) class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ # Both in bytes downloaded = None expected = None def __init__(self, downloaded, expected): self.downloaded = downloaded self.expected = expected class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-No-Compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): for h,v in std_headers.items(): if h in req.headers: del req.headers[h] req.add_header(h, v) if 'Youtubedl-no-compression' in req.headers: if 'Accept-encoding' in req.headers: del req.headers['Accept-encoding'] del req.headers['Youtubedl-no-compression'] if 'Youtubedl-user-agent' in req.headers: if 'User-agent' in req.headers: del req.headers['User-agent'] req.headers['User-agent'] = req.headers['Youtubedl-user-agent'] del req.headers['Youtubedl-user-agent'] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg return resp https_request = http_request https_response = http_response def unified_strdate(date_str): """Return a string with the date in the format YYYYMMDD""" upload_date = None #Replace commas date_str = date_str.replace(',',' ') # %z (UTC offset) is only supported in python>=3.2 date_str = re.sub(r' ?(\+|-)[0-9:]*$', '', date_str) format_expressions = [ '%d %B %Y', '%B %d %Y', '%b %d %Y', '%Y-%m-%d', '%d/%m/%Y', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S', '%d.%m.%Y %H:%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M', ] for expression in format_expressions: try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') return upload_date def determine_ext(url, default_ext=u'unknown_video'): guess = url.partition(u'?')[0].rpartition(u'.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str == 'now'or date_str == 'today': return today match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') #A bad aproximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, "%Y%m%d").date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day,day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % ( self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def write_string(s, out=None): if out is None: out = sys.stderr assert type(s) == compat_str if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr s = s.encode(preferredencoding(), 'ignore') try: out.write(s) except UnicodeEncodeError: # In Windows shells, this can fail even when the codec is just charmap!? # See https://wiki.python.org/moin/PrintFails#Issue if sys.platform == 'win32' and hasattr(out, 'encoding'): s = s.encode(out.encoding, 'ignore').decode(out.encoding) out.write(s) else: raise out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' if isinstance(chr(0), bytes): # Python 2 return ''.join([chr(x) for x in xs]) else: return bytes(xs) def get_cachedir(params={}): cache_root = os.environ.get('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) return params.get('cachedir', os.path.join(cache_root, 'youtube-dl')) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: import fcntl def _lock_file(f, exclusive): fcntl.lockf(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.lockf(f, fcntl.LOCK_UN) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def shell_quote(args): quoted_args = [] encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return u' '.join(quoted_args) def takewhile_inclusive(pred, seq): """ Like itertools.takewhile, but include the latest evaluated element (the first element so that Not pred(e)) """ for e in seq: yield e if not pred(e): return def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ sdata = compat_urllib_parse.urlencode( {u'__youtubedl_smuggle': json.dumps(data)}) return url + u'#' + sdata def unsmuggle_url(smug_url, default=None): if not '#__youtubedl_smuggle' in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition(u'#') jsond = compat_parse_qs(sdata)[u'__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return u'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = [u'B', u'KiB', u'MiB', u'GiB', u'TiB', u'PiB', u'EiB', u'ZiB', u'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return u'%.2f%s' % (converted, suffix) def str_to_int(int_str): int_str = re.sub(r'[,\.]', u'', int_str) return int(int_str) def get_term_width(): columns = os.environ.get('COLUMNS', None) if columns: return int(columns) try: sp = subprocess.Popen( ['stty', 'size'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = sp.communicate() return int(out.split()[1]) except: pass return None def month_by_name(name): """ Return the number of a month by (locale-independently) English name """ ENGLISH_NAMES = [ u'January', u'February', u'March', u'April', u'May', u'June', u'July', u'August', u'September', u'October', u'November', u'December'] try: return ENGLISH_NAMES.index(name) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', u'&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) try: libc = ctypes.cdll.LoadLibrary("libc.so.6") except OSError: return title = title buf = ctypes.create_string_buffer(len(title) + 1) buf.value = title.encode('utf-8') try: libc.prctl(15, ctypes.byref(buf), 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): if s.startswith(start): return s[len(start):] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip(u'/').split(u'/')[-1] class HEADRequest(compat_urllib_request.Request): def get_method(self): return "HEAD" def int_or_none(v, scale=1): return v if v is None else (int(v) // scale) def parse_duration(s): if s is None: return None m = re.match( r'(?:(?:(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)$', s) if not m: return None res = int(m.group('secs')) if m.group('mins'): res += int(m.group('mins')) * 60 if m.group('hours'): res += int(m.group('hours')) * 60 * 60 return res def prepend_extension(filename, ext): name, real_ext = os.path.splitext(filename) return u'{0}.{1}{2}'.format(name, ext, real_ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe class PagedList(object): def __init__(self, pagefunc, pagesize): self._pagefunc = pagefunc self._pagesize = pagesize def __len__(self): # This is only useful for tests return len(self.getslice()) def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = list(self._pagefunc(pagenum)) startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res def uppercase_escape(s): return re.sub( r'\\U([0-9a-fA-F]{8})', lambda m: compat_chr(int(m.group(1), base=16)), s) #!/usr/bin/env python # -*- coding: utf-8 -*- import ctypes import datetime import email.utils import errno import gzip import itertools import io import json import locale import math import os import pipes import platform import re import ssl import socket import subprocess import sys import traceback import zlib try: import urllib.request as compat_urllib_request except ImportError: # Python 2 import urllib2 as compat_urllib_request try: import urllib.error as compat_urllib_error except ImportError: # Python 2 import urllib2 as compat_urllib_error try: import urllib.parse as compat_urllib_parse except ImportError: # Python 2 import urllib as compat_urllib_parse try: from urllib.parse import urlparse as compat_urllib_parse_urlparse except ImportError: # Python 2 from urlparse import urlparse as compat_urllib_parse_urlparse try: import urllib.parse as compat_urlparse except ImportError: # Python 2 import urlparse as compat_urlparse try: import http.cookiejar as compat_cookiejar except ImportError: # Python 2 import cookielib as compat_cookiejar try: import html.entities as compat_html_entities except ImportError: # Python 2 import htmlentitydefs as compat_html_entities try: import html.parser as compat_html_parser except ImportError: # Python 2 import HTMLParser as compat_html_parser try: import http.client as compat_http_client except ImportError: # Python 2 import httplib as compat_http_client try: from urllib.error import HTTPError as compat_HTTPError except ImportError: # Python 2 from urllib2 import HTTPError as compat_HTTPError try: from urllib.request import urlretrieve as compat_urlretrieve except ImportError: # Python 2 from urllib import urlretrieve as compat_urlretrieve try: from subprocess import DEVNULL compat_subprocess_get_DEVNULL = lambda: DEVNULL except ImportError: compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w') try: from urllib.parse import parse_qs as compat_parse_qs except ImportError: # Python 2 # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib. # Python 2's version is apparently totally broken def _unquote(string, encoding='utf-8', errors='replace'): if string == '': return string res = string.split('%') if len(res) == 1: return string if encoding is None: encoding = 'utf-8' if errors is None: errors = 'replace' # pct_sequence: contiguous sequence of percent-encoded bytes, decoded pct_sequence = b'' string = res[0] for item in res[1:]: try: if not item: raise ValueError pct_sequence += item[:2].decode('hex') rest = item[2:] if not rest: # This segment was just a single percent-encoded character. # May be part of a sequence of code units, so delay decoding. # (Stored in pct_sequence). continue except ValueError: rest = '%' + item # Encountered non-percent-encoded characters. Flush the current # pct_sequence. string += pct_sequence.decode(encoding, errors) + rest pct_sequence = b'' if pct_sequence: # Flush the final pct_sequence string += pct_sequence.decode(encoding, errors) return string def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace'): qs, _coerce_result = qs, unicode pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] r = [] for name_value in pairs: if not name_value and not strict_parsing: continue nv = name_value.split('=', 1) if len(nv) != 2: if strict_parsing: raise ValueError("bad query field: %r" % (name_value,)) # Handle case of a control-name with no equal sign if keep_blank_values: nv.append('') else: continue if len(nv[1]) or keep_blank_values: name = nv[0].replace('+', ' ') name = _unquote(name, encoding=encoding, errors=errors) name = _coerce_result(name) value = nv[1].replace('+', ' ') value = _unquote(value, encoding=encoding, errors=errors) value = _coerce_result(value) r.append((name, value)) return r def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace'): parsed_result = {} pairs = _parse_qsl(qs, keep_blank_values, strict_parsing, encoding=encoding, errors=errors) for name, value in pairs: if name in parsed_result: parsed_result[name].append(value) else: parsed_result[name] = [value] return parsed_result try: compat_str = unicode # Python 2 except NameError: compat_str = str try: compat_chr = unichr # Python 2 except NameError: compat_chr = chr def compat_ord(c): if type(c) is int: return c else: return ord(c) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() u'TEST'.encode(pref) except: pref = 'UTF-8' return pref if sys.version_info < (3,0): def compat_print(s): print(s.encode(preferredencoding(), 'xmlcharrefreplace')) else: def compat_print(s): assert type(s) == type(u'') print(s) # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3,0): def write_json_file(obj, fn): with open(fn, 'wb') as f: json.dump(obj, f) else: def write_json_file(obj, fn): with open(fn, 'w', encoding='utf-8') as f: json.dump(obj, f) if sys.version_info >= (2,7): def find_xpath_attr(node, xpath, key, val): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z]+$', key) assert re.match(r'^[a-zA-Z0-9@\s:._]*$', val) expr = xpath + u"[@%s='%s']" % (key, val) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val): for f in node.findall(xpath): if f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def htmlentity_transform(matchobj): """Transforms an HTML entity to a character. This function receives a match object and is intended to be used with the re.sub() function. """ entity = matchobj.group(1) # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) mobj = re.match(u'(?u)#(x?\\d+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith(u'x'): base = 16 numstr = u'0%s' % numstr else: base = 10 return compat_chr(int(numstr, base)) # Unknown entity in name, return its literal representation return (u'&%s;' % entity) compat_html_parser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix class BaseHTMLParser(compat_html_parser.HTMLParser): def __init(self): compat_html_parser.HTMLParser.__init__(self) self.html = None def loads(self, html): self.html = html self.feed(html) self.close() class AttrParser(BaseHTMLParser): """Modified HTMLParser that isolates a tag with the specified attribute""" def __init__(self, attribute, value): self.attribute = attribute self.value = value self.result = None self.started = False self.depth = {} self.watch_startpos = False self.error_count = 0 BaseHTMLParser.__init__(self) def error(self, message): if self.error_count > 10 or self.started: raise compat_html_parser.HTMLParseError(message, self.getpos()) self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line self.error_count += 1 self.goahead(1) def handle_starttag(self, tag, attrs): attrs = dict(attrs) if self.started: self.find_startpos(None) if self.attribute in attrs and attrs[self.attribute] == self.value: self.result = [tag] self.started = True self.watch_startpos = True if self.started: if not tag in self.depth: self.depth[tag] = 0 self.depth[tag] += 1 def handle_endtag(self, tag): if self.started: if tag in self.depth: self.depth[tag] -= 1 if self.depth[self.result[0]] == 0: self.started = False self.result.append(self.getpos()) def find_startpos(self, x): """Needed to put the start position of the result (self.result[1]) after the opening tag with the requested id""" if self.watch_startpos: self.watch_startpos = False self.result.append(self.getpos()) handle_entityref = handle_charref = handle_data = handle_comment = \ handle_decl = handle_pi = unknown_decl = find_startpos def get_result(self): if self.result is None: return None if len(self.result) != 3: return None lines = self.html.split('\n') lines = lines[self.result[1][0]-1:self.result[2][0]] lines[0] = lines[0][self.result[1][1]:] if len(lines) == 1: lines[-1] = lines[-1][:self.result[2][1]-self.result[1][1]] lines[-1] = lines[-1][:self.result[2][1]] return '\n'.join(lines).strip() # Hack for https://github.com/rg3/youtube-dl/issues/662 if sys.version_info < (2, 7, 3): AttrParser.parse_endtag = (lambda self, i: i + len("</scr'+'ipt>") if self.rawdata[i:].startswith("</scr'+'ipt>") else compat_html_parser.HTMLParser.parse_endtag(self, i)) def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute("id", id, html) def get_element_by_attribute(attribute, value, html): """Return the content of the tag with the specified attribute in the passed HTML document""" parser = AttrParser(attribute, value) try: parser.loads(html) except compat_html_parser.HTMLParseError: pass return parser.get_result() class MetaParser(BaseHTMLParser): """ Modified HTMLParser that isolates a meta tag with the specified name attribute. """ def __init__(self, name): BaseHTMLParser.__init__(self) self.name = name self.content = None self.result = None def handle_starttag(self, tag, attrs): if tag != 'meta': return attrs = dict(attrs) if attrs.get('name') == self.name: self.result = attrs.get('content') def get_result(self): return self.result def get_meta_content(name, html): """ Return the content attribute from the meta tag with the given name attribute. """ parser = MetaParser(name) try: parser.loads(html) except compat_html_parser.HTMLParseError: pass return parser.get_result() def clean_html(html): """Clean an HTML snippet into a readable string""" # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == u'-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = os.path.join( re.sub(u'[/<>:"\\|\\\\?\\*]', u'#', path_part) for path_part in os.path.split(filename) ) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char result = u''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if not result: result = '_' return result def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def unescapeHTML(s): """ @param s a string """ assert type(s) == type(u'') result = re.sub(u'(?u)&(.+?);', htmlentity_transform, s) return result def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # Pass u'' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess: return s else: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return s.encode(encoding, 'ignore') def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(opts_no_check_certificate, **kwargs): if sys.version_info < (3, 2): import httplib class HTTPSConnectionV3(httplib.HTTPSConnection): def __init__(self, *args, **kwargs): httplib.HTTPSConnection.__init__(self, *args, **kwargs) def connect(self): sock = socket.create_connection((self.host, self.port), self.timeout) if getattr(self, '_tunnel_host', False): self.sock = sock self._tunnel() try: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv3) except ssl.SSLError: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv23) class HTTPSHandlerV3(compat_urllib_request.HTTPSHandler): def https_open(self, req): return self.do_open(HTTPSConnectionV3, req) return HTTPSHandlerV3(**kwargs) else: context = ssl.SSLContext(ssl.PROTOCOL_SSLv3) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() try: context.load_default_certs() except AttributeError: pass # Python < 3.4 return compat_urllib_request.HTTPSHandler(context=context, **kwargs) class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if not expected: msg = msg + u'; please report this issue on https://yt-dl.org/bug . Be sure to call youtube-dl with the --verbose flag and include its complete output. Make sure you are using the latest version; type youtube-dl -U to update.' super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause def format_traceback(self): if self.traceback is None: return None return u''.join(traceback.format_tb(self.traceback)) class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ # Both in bytes downloaded = None expected = None def __init__(self, downloaded, expected): self.downloaded = downloaded self.expected = expected class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-No-Compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): for h,v in std_headers.items(): if h in req.headers: del req.headers[h] req.add_header(h, v) if 'Youtubedl-no-compression' in req.headers: if 'Accept-encoding' in req.headers: del req.headers['Accept-encoding'] del req.headers['Youtubedl-no-compression'] if 'Youtubedl-user-agent' in req.headers: if 'User-agent' in req.headers: del req.headers['User-agent'] req.headers['User-agent'] = req.headers['Youtubedl-user-agent'] del req.headers['Youtubedl-user-agent'] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg return resp https_request = http_request https_response = http_response def unified_strdate(date_str): """Return a string with the date in the format YYYYMMDD""" upload_date = None #Replace commas date_str = date_str.replace(',', ' ') # %z (UTC offset) is only supported in python>=3.2 date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str) format_expressions = [ '%d %B %Y', '%B %d %Y', '%b %d %Y', '%Y-%m-%d', '%d/%m/%Y', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S', '%d.%m.%Y %H:%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M', ] for expression in format_expressions: try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') return upload_date def determine_ext(url, default_ext=u'unknown_video'): guess = url.partition(u'?')[0].rpartition(u'.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str == 'now'or date_str == 'today': return today match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') #A bad aproximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, "%Y%m%d").date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day,day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % ( self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def write_string(s, out=None): if out is None: out = sys.stderr assert type(s) == compat_str if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr s = s.encode(preferredencoding(), 'ignore') try: out.write(s) except UnicodeEncodeError: # In Windows shells, this can fail even when the codec is just charmap!? # See https://wiki.python.org/moin/PrintFails#Issue if sys.platform == 'win32' and hasattr(out, 'encoding'): s = s.encode(out.encoding, 'ignore').decode(out.encoding) out.write(s) else: raise out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' if isinstance(chr(0), bytes): # Python 2 return ''.join([chr(x) for x in xs]) else: return bytes(xs) def get_cachedir(params={}): cache_root = os.environ.get('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) return params.get('cachedir', os.path.join(cache_root, 'youtube-dl')) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: import fcntl def _lock_file(f, exclusive): fcntl.lockf(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.lockf(f, fcntl.LOCK_UN) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def shell_quote(args): quoted_args = [] encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return u' '.join(quoted_args) def takewhile_inclusive(pred, seq): """ Like itertools.takewhile, but include the latest evaluated element (the first element so that Not pred(e)) """ for e in seq: yield e if not pred(e): return def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ sdata = compat_urllib_parse.urlencode( {u'__youtubedl_smuggle': json.dumps(data)}) return url + u'#' + sdata def unsmuggle_url(smug_url, default=None): if not '#__youtubedl_smuggle' in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition(u'#') jsond = compat_parse_qs(sdata)[u'__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return u'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = [u'B', u'KiB', u'MiB', u'GiB', u'TiB', u'PiB', u'EiB', u'ZiB', u'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return u'%.2f%s' % (converted, suffix) def str_to_int(int_str): int_str = re.sub(r'[,\.]', u'', int_str) return int(int_str) def get_term_width(): columns = os.environ.get('COLUMNS', None) if columns: return int(columns) try: sp = subprocess.Popen( ['stty', 'size'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = sp.communicate() return int(out.split()[1]) except: pass return None def month_by_name(name): """ Return the number of a month by (locale-independently) English name """ ENGLISH_NAMES = [ u'January', u'February', u'March', u'April', u'May', u'June', u'July', u'August', u'September', u'October', u'November', u'December'] try: return ENGLISH_NAMES.index(name) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', u'&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) try: libc = ctypes.cdll.LoadLibrary("libc.so.6") except OSError: return title = title buf = ctypes.create_string_buffer(len(title) + 1) buf.value = title.encode('utf-8') try: libc.prctl(15, ctypes.byref(buf), 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): if s.startswith(start): return s[len(start):] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip(u'/').split(u'/')[-1] class HEADRequest(compat_urllib_request.Request): def get_method(self): return "HEAD" def int_or_none(v, scale=1): return v if v is None else (int(v) // scale) def parse_duration(s): if s is None: return None m = re.match( r'(?:(?:(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)$', s) if not m: return None res = int(m.group('secs')) if m.group('mins'): res += int(m.group('mins')) * 60 if m.group('hours'): res += int(m.group('hours')) * 60 * 60 return res def prepend_extension(filename, ext): name, real_ext = os.path.splitext(filename) return u'{0}.{1}{2}'.format(name, ext, real_ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe class PagedList(object): def __init__(self, pagefunc, pagesize): self._pagefunc = pagefunc self._pagesize = pagesize def __len__(self): # This is only useful for tests return len(self.getslice()) def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = list(self._pagefunc(pagenum)) startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res def uppercase_escape(s): return re.sub( r'\\U([0-9a-fA-F]{8})', lambda m: compat_chr(int(m.group(1), base=16)), s)
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-41-fixed/youtube-dl/youtube_dl/utils.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-41-buggy/youtube-dl/youtube_dl/utils.py
youtube-dl-bug-9
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import collections import contextlib import datetime import errno import fileinput import io import itertools import json import locale import operator import os import platform import re import shutil import subprocess import socket import sys import time import tokenize import traceback if os.name == 'nt': import ctypes from .compat import ( compat_basestring, compat_cookiejar, compat_expanduser, compat_get_terminal_size, compat_http_client, compat_kwargs, compat_str, compat_tokenize_tokenize, compat_urllib_error, compat_urllib_request, ) from .utils import ( escape_url, ContentTooShortError, date_from_str, DateRange, DEFAULT_OUTTMPL, determine_ext, DownloadError, encodeFilename, ExtractorError, format_bytes, formatSeconds, HEADRequest, locked_file, make_HTTPS_handler, MaxDownloadsReached, PagedList, parse_filesize, PerRequestProxyHandler, PostProcessingError, platform_name, preferredencoding, render_table, SameFileError, sanitize_filename, sanitize_path, std_headers, subtitles_filename, UnavailableVideoError, url_basename, version_tuple, write_json_file, write_string, YoutubeDLHandler, prepend_extension, replace_extension, args_to_str, age_restricted, ) from .cache import Cache from .extractor import get_info_extractor, gen_extractors from .downloader import get_suitable_downloader from .downloader.rtmp import rtmpdump_version from .postprocessor import ( FFmpegFixupM4aPP, FFmpegFixupStretchedPP, FFmpegMergerPP, FFmpegPostProcessor, get_postprocessor, ) from .version import __version__ class YoutubeDL(object): """YoutubeDL class. YoutubeDL objects are the ones responsible of downloading the actual video file and writing it to disk if the user has requested it, among some other tasks. In most cases there should be one per program. As, given a video URL, the downloader doesn't know how to extract all the needed information, task that InfoExtractors do, it has to pass the URL to one of them. For this, YoutubeDL objects have a method that allows InfoExtractors to be registered in a given order. When it is passed a URL, the YoutubeDL object handles it to the first InfoExtractor it finds that reports being able to handle it. The InfoExtractor extracts all the information about the video or videos the URL refers to, and YoutubeDL process the extracted information, possibly using a File Downloader to download the video. YoutubeDL objects accept a lot of parameters. In order not to saturate the object constructor with arguments, it receives a dictionary of options instead. These options are available through the params attribute for the InfoExtractors to use. The YoutubeDL also registers itself as the downloader in charge for the InfoExtractors that are added to it, so this is a "mutual registration". Available options: username: Username for authentication purposes. password: Password for authentication purposes. videopassword: Password for accessing a video. usenetrc: Use netrc for authentication instead. verbose: Print additional info to stdout. quiet: Do not print messages to stdout. no_warnings: Do not print out anything for warnings. forceurl: Force printing final URL. forcetitle: Force printing title. forceid: Force printing ID. forcethumbnail: Force printing thumbnail URL. forcedescription: Force printing description. forcefilename: Force printing final filename. forceduration: Force printing duration. forcejson: Force printing info_dict as JSON. dump_single_json: Force printing the info_dict of the whole playlist (or video) as a single JSON line. simulate: Do not download the video files. format: Video format code. See options.py for more information. outtmpl: Template for output names. restrictfilenames: Do not allow "&" and spaces in file names ignoreerrors: Do not stop on download errors. force_generic_extractor: Force downloader to use the generic extractor nooverwrites: Prevent overwriting files. playliststart: Playlist item to start at. playlistend: Playlist item to end at. playlist_items: Specific indices of playlist to download. playlistreverse: Download playlist items in reverse order. matchtitle: Download only matching titles. rejecttitle: Reject downloads for matching titles. logger: Log messages to a logging.Logger instance. logtostderr: Log messages to stderr instead of stdout. writedescription: Write the video description to a .description file writeinfojson: Write the video description to a .info.json file writeannotations: Write the video annotations to a .annotations.xml file writethumbnail: Write the thumbnail image to a file write_all_thumbnails: Write all thumbnail formats to files writesubtitles: Write the video subtitles to a file writeautomaticsub: Write the automatic subtitles to a file allsubtitles: Downloads all the subtitles of the video (requires writesubtitles or writeautomaticsub) listsubtitles: Lists all available subtitles for the video subtitlesformat: The format code for subtitles subtitleslangs: List of languages of the subtitles to download keepvideo: Keep the video file after post-processing daterange: A DateRange object, download only if the upload_date is in the range. skip_download: Skip the actual download of the video file cachedir: Location of the cache files in the filesystem. False to disable filesystem cache. noplaylist: Download single video instead of a playlist if in doubt. age_limit: An integer representing the user's age in years. Unsuitable videos for the given age are skipped. min_views: An integer representing the minimum view count the video must have in order to not be skipped. Videos without view count information are always downloaded. None for no limit. max_views: An integer representing the maximum view count. Videos that are more popular than that are not downloaded. Videos without view count information are always downloaded. None for no limit. download_archive: File name of a file where all downloads are recorded. Videos already present in the file are not downloaded again. cookiefile: File name where cookies should be read from and dumped to. nocheckcertificate:Do not verify SSL certificates prefer_insecure: Use HTTP instead of HTTPS to retrieve information. At the moment, this is only supported by YouTube. proxy: URL of the proxy server to use cn_verification_proxy: URL of the proxy to use for IP address verification on Chinese sites. (Experimental) socket_timeout: Time to wait for unresponsive hosts, in seconds bidi_workaround: Work around buggy terminals without bidirectional text support, using fridibi debug_printtraffic:Print out sent and received HTTP traffic include_ads: Download ads as well default_search: Prepend this string if an input url is not valid. 'auto' for elaborate guessing encoding: Use this encoding instead of the system-specified. extract_flat: Do not resolve URLs, return the immediate result. Pass in 'in_playlist' to only show this behavior for playlist items. postprocessors: A list of dictionaries, each with an entry * key: The name of the postprocessor. See youtube_dl/postprocessor/__init__.py for a list. as well as any further keyword arguments for the postprocessor. progress_hooks: A list of functions that get called on download progress, with a dictionary with the entries * status: One of "downloading", "error", or "finished". Check this first and ignore unknown values. If status is one of "downloading", or "finished", the following properties may also be present: * filename: The final filename (always present) * tmpfilename: The filename we're currently writing to * downloaded_bytes: Bytes on disk * total_bytes: Size of the whole file, None if unknown * total_bytes_estimate: Guess of the eventual file size, None if unavailable. * elapsed: The number of seconds since download started. * eta: The estimated time in seconds, None if unknown * speed: The download speed in bytes/second, None if unknown * fragment_index: The counter of the currently downloaded video fragment. * fragment_count: The number of fragments (= individual files that will be merged) Progress hooks are guaranteed to be called at least once (with status "finished") if the download is successful. merge_output_format: Extension to use when merging formats. fixup: Automatically correct known faults of the file. One of: - "never": do nothing - "warn": only emit a warning - "detect_or_warn": check whether we can do anything about it, warn otherwise (default) source_address: (Experimental) Client-side IP address to bind to. call_home: Boolean, true iff we are allowed to contact the youtube-dl servers for debugging. sleep_interval: Number of seconds to sleep before each download. listformats: Print an overview of available video formats and exit. list_thumbnails: Print a table of all thumbnails and exit. match_filter: A function that gets called with the info_dict of every video. If it returns a message, the video is ignored. If it returns None, the video is downloaded. match_filter_func in utils.py is one example for this. no_color: Do not emit color codes in output. The following options determine which downloader is picked: external_downloader: Executable of the external downloader to call. None or unset for standard (built-in) downloader. hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv. The following parameters are not used by YoutubeDL itself, they are used by the downloader (see youtube_dl/downloader/common.py): nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test, noresizebuffer, retries, continuedl, noprogress, consoletitle, xattr_set_filesize, external_downloader_args. The following options are used by the post processors: prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available, otherwise prefer avconv. """ params = None _ies = [] _pps = [] _download_retcode = None _num_downloads = None _screen_file = None def __init__(self, params=None, auto_init=True): """Create a FileDownloader object with the given options.""" if params is None: params = {} self._ies = [] self._ies_instances = {} self._pps = [] self._progress_hooks = [] self._download_retcode = 0 self._num_downloads = 0 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] self._err_file = sys.stderr self.params = params self.cache = Cache(self) if params.get('bidi_workaround', False): try: import pty master, slave = pty.openpty() width = compat_get_terminal_size().columns if width is None: width_args = [] else: width_args = ['-w', str(width)] sp_kwargs = dict( stdin=subprocess.PIPE, stdout=slave, stderr=self._err_file) try: self._output_process = subprocess.Popen( ['bidiv'] + width_args, **sp_kwargs ) except OSError: self._output_process = subprocess.Popen( ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs) self._output_channel = os.fdopen(master, 'rb') except OSError as ose: if ose.errno == 2: self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.') else: raise if (sys.version_info >= (3,) and sys.platform != 'win32' and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] and not params.get('restrictfilenames', False)): # On Python 3, the Unicode filesystem API will throw errors (#1474) self.report_warning( 'Assuming --restrict-filenames since file system encoding ' 'cannot encode all characters. ' 'Set the LC_ALL environment variable to fix this.') self.params['restrictfilenames'] = True if isinstance(params.get('outtmpl'), bytes): self.report_warning( 'Parameter outtmpl is bytes, but should be a unicode string. ' 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.') self._setup_opener() if auto_init: self.print_debug_header() self.add_default_info_extractors() for pp_def_raw in self.params.get('postprocessors', []): pp_class = get_postprocessor(pp_def_raw['key']) pp_def = dict(pp_def_raw) del pp_def['key'] pp = pp_class(self, **compat_kwargs(pp_def)) self.add_post_processor(pp) for ph in self.params.get('progress_hooks', []): self.add_progress_hook(ph) def warn_if_short_id(self, argv): # short YouTube ID starting with dash? idxs = [ i for i, a in enumerate(argv) if re.match(r'^-[0-9A-Za-z_-]{10}$', a)] if idxs: correct_argv = ( ['youtube-dl'] + [a for i, a in enumerate(argv) if i not in idxs] + ['--'] + [argv[i] for i in idxs] ) self.report_warning( 'Long argument string detected. ' 'Use -- to separate parameters and URLs, like this:\n%s\n' % args_to_str(correct_argv)) def add_info_extractor(self, ie): """Add an InfoExtractor object to the end of the list.""" self._ies.append(ie) self._ies_instances[ie.ie_key()] = ie ie.set_downloader(self) def get_info_extractor(self, ie_key): """ Get an instance of an IE with name ie_key, it will try to get one from the _ies list, if there's no instance it will create a new one and add it to the extractor list. """ ie = self._ies_instances.get(ie_key) if ie is None: ie = get_info_extractor(ie_key)() self.add_info_extractor(ie) return ie def add_default_info_extractors(self): """ Add the InfoExtractors returned by gen_extractors to the end of the list """ for ie in gen_extractors(): self.add_info_extractor(ie) def add_post_processor(self, pp): """Add a PostProcessor object to the end of the chain.""" self._pps.append(pp) pp.set_downloader(self) def add_progress_hook(self, ph): """Add the progress hook (currently only for the file downloader)""" self._progress_hooks.append(ph) def _bidi_workaround(self, message): if not hasattr(self, '_output_channel'): return message assert hasattr(self, '_output_process') assert isinstance(message, compat_str) line_count = message.count('\n') + 1 self._output_process.stdin.write((message + '\n').encode('utf-8')) self._output_process.stdin.flush() res = ''.join(self._output_channel.readline().decode('utf-8') for _ in range(line_count)) return res[:-len('\n')] def to_screen(self, message, skip_eol=False): """Print message to stdout if not in quiet mode.""" return self.to_stdout(message, skip_eol, check_quiet=True) def _write_string(self, s, out=None): write_string(s, out=out, encoding=self.params.get('encoding')) def to_stdout(self, message, skip_eol=False, check_quiet=False): """Print message to stdout if not in quiet mode.""" if self.params.get('logger'): self.params['logger'].debug(message) elif not check_quiet or not self.params.get('quiet', False): message = self._bidi_workaround(message) terminator = ['\n', ''][skip_eol] output = message + terminator self._write_string(output, self._screen_file) def to_stderr(self, message): """Print message to stderr.""" assert isinstance(message, compat_str) if self.params.get('logger'): self.params['logger'].error(message) else: message = self._bidi_workaround(message) output = message + '\n' self._write_string(output, self._err_file) def to_console_title(self, message): if not self.params.get('consoletitle', False): return if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow(): # c_wchar_p() might not be necessary if `message` is # already of type unicode() ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) elif 'TERM' in os.environ: self._write_string('\033]0;%s\007' % message, self._screen_file) def save_console_title(self): if not self.params.get('consoletitle', False): return if 'TERM' in os.environ: # Save the title on stack self._write_string('\033[22;0t', self._screen_file) def restore_console_title(self): if not self.params.get('consoletitle', False): return if 'TERM' in os.environ: # Restore the title from stack self._write_string('\033[23;0t', self._screen_file) def __enter__(self): self.save_console_title() return self def __exit__(self, *args): self.restore_console_title() if self.params.get('cookiefile') is not None: self.cookiejar.save() def trouble(self, message=None, tb=None): """Determine action to take when a download problem appears. Depending on if the downloader has been configured to ignore download errors or not, this method may throw an exception or not when errors are found, after printing the message. tb, if given, is additional traceback information. """ if message is not None: self.to_stderr(message) if self.params.get('verbose'): if tb is None: if sys.exc_info()[0]: # if .trouble has been called from an except block tb = '' if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info)) tb += compat_str(traceback.format_exc()) else: tb_data = traceback.format_list(traceback.extract_stack()) tb = ''.join(tb_data) self.to_stderr(tb) if not self.params.get('ignoreerrors', False): if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: exc_info = sys.exc_info()[1].exc_info else: exc_info = sys.exc_info() raise DownloadError(message, exc_info) self._download_retcode = 1 def report_warning(self, message): ''' Print the message to stderr, it will be prefixed with 'WARNING:' If stderr is a tty file the 'WARNING:' will be colored ''' if self.params.get('logger') is not None: self.params['logger'].warning(message) else: if self.params.get('no_warnings'): return if not self.params.get('no_color') and self._err_file.isatty() and os.name != 'nt': _msg_header = '\033[0;33mWARNING:\033[0m' else: _msg_header = 'WARNING:' warning_message = '%s %s' % (_msg_header, message) self.to_stderr(warning_message) def report_error(self, message, tb=None): ''' Do the same as trouble, but prefixes the message with 'ERROR:', colored in red if stderr is a tty file. ''' if not self.params.get('no_color') and self._err_file.isatty() and os.name != 'nt': _msg_header = '\033[0;31mERROR:\033[0m' else: _msg_header = 'ERROR:' error_message = '%s %s' % (_msg_header, message) self.trouble(error_message, tb) def report_file_already_downloaded(self, file_name): """Report file has already been fully downloaded.""" try: self.to_screen('[download] %s has already been downloaded' % file_name) except UnicodeEncodeError: self.to_screen('[download] The file has already been downloaded') def prepare_filename(self, info_dict): """Generate the output filename.""" try: template_dict = dict(info_dict) template_dict['epoch'] = int(time.time()) autonumber_size = self.params.get('autonumber_size') if autonumber_size is None: autonumber_size = 5 autonumber_templ = '%0' + str(autonumber_size) + 'd' template_dict['autonumber'] = autonumber_templ % self._num_downloads if template_dict.get('playlist_index') is not None: template_dict['playlist_index'] = '%0*d' % (len(str(template_dict['n_entries'])), template_dict['playlist_index']) if template_dict.get('resolution') is None: if template_dict.get('width') and template_dict.get('height'): template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height']) elif template_dict.get('height'): template_dict['resolution'] = '%sp' % template_dict['height'] elif template_dict.get('width'): template_dict['resolution'] = '?x%d' % template_dict['width'] sanitize = lambda k, v: sanitize_filename( compat_str(v), restricted=self.params.get('restrictfilenames'), is_id=(k == 'id')) template_dict = dict((k, sanitize(k, v)) for k, v in template_dict.items() if v is not None) template_dict = collections.defaultdict(lambda: 'NA', template_dict) outtmpl = sanitize_path(self.params.get('outtmpl', DEFAULT_OUTTMPL)) tmpl = compat_expanduser(outtmpl) filename = tmpl % template_dict # Temporary fix for #4787 # 'Treat' all problem characters by passing filename through preferredencoding # to workaround encoding issues with subprocess on python2 @ Windows if sys.version_info < (3, 0) and sys.platform == 'win32': filename = encodeFilename(filename, True).decode(preferredencoding()) return filename except ValueError as err: self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')') return None def _match_entry(self, info_dict, incomplete): """ Returns None iff the file should be downloaded """ video_title = info_dict.get('title', info_dict.get('id', 'video')) if 'title' in info_dict: # This can happen when we're just evaluating the playlist title = info_dict['title'] matchtitle = self.params.get('matchtitle', False) if matchtitle: if not re.search(matchtitle, title, re.IGNORECASE): return '"' + title + '" title did not match pattern "' + matchtitle + '"' rejecttitle = self.params.get('rejecttitle', False) if rejecttitle: if re.search(rejecttitle, title, re.IGNORECASE): return '"' + title + '" title matched reject pattern "' + rejecttitle + '"' date = info_dict.get('upload_date', None) if date is not None: dateRange = self.params.get('daterange', DateRange()) if date not in dateRange: return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange) view_count = info_dict.get('view_count', None) if view_count is not None: min_views = self.params.get('min_views') if min_views is not None and view_count < min_views: return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views) max_views = self.params.get('max_views') if max_views is not None and view_count > max_views: return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views) if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')): return 'Skipping "%s" because it is age restricted' % video_title if self.in_download_archive(info_dict): return '%s has already been recorded in archive' % video_title if not incomplete: match_filter = self.params.get('match_filter') if match_filter is not None: ret = match_filter(info_dict) if ret is not None: return ret return None @staticmethod def add_extra_info(info_dict, extra_info): '''Set the keys from extra_info in info dict if they are missing''' for key, value in extra_info.items(): info_dict.setdefault(key, value) def extract_info(self, url, download=True, ie_key=None, extra_info={}, process=True, force_generic_extractor=False): ''' Returns a list with a dictionary for each video we find. If 'download', also downloads the videos. extra_info is a dict containing the extra values to add to each result ''' if not ie_key and force_generic_extractor: ie_key = 'Generic' if ie_key: ies = [self.get_info_extractor(ie_key)] else: ies = self._ies for ie in ies: if not ie.suitable(url): continue if not ie.working(): self.report_warning('The program functionality for this site has been marked as broken, ' 'and will probably not work.') try: ie_result = ie.extract(url) if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here) break if isinstance(ie_result, list): # Backwards compatibility: old IE result format ie_result = { '_type': 'compat_list', 'entries': ie_result, } self.add_default_extra_info(ie_result, ie, url) if process: return self.process_ie_result(ie_result, download, extra_info) else: return ie_result except ExtractorError as de: # An error we somewhat expected self.report_error(compat_str(de), de.format_traceback()) break except MaxDownloadsReached: raise except Exception as e: if self.params.get('ignoreerrors', False): self.report_error(compat_str(e), tb=compat_str(traceback.format_exc())) break else: raise else: self.report_error('no suitable InfoExtractor for URL %s' % url) def add_default_extra_info(self, ie_result, ie, url): self.add_extra_info(ie_result, { 'extractor': ie.IE_NAME, 'webpage_url': url, 'webpage_url_basename': url_basename(url), 'extractor_key': ie.ie_key(), }) def process_ie_result(self, ie_result, download=True, extra_info={}): """ Take the result of the ie(may be modified) and resolve all unresolved references (URLs, playlist items). It will also download the videos if 'download'. Returns the resolved ie_result. """ result_type = ie_result.get('_type', 'video') if result_type in ('url', 'url_transparent'): extract_flat = self.params.get('extract_flat', False) if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or extract_flat is True): if self.params.get('forcejson', False): self.to_stdout(json.dumps(ie_result)) return ie_result if result_type == 'video': self.add_extra_info(ie_result, extra_info) return self.process_video_result(ie_result, download=download) elif result_type == 'url': # We have to add extra_info to the results because it may be # contained in a playlist return self.extract_info(ie_result['url'], download, ie_key=ie_result.get('ie_key'), extra_info=extra_info) elif result_type == 'url_transparent': # Use the information from the embedding page info = self.extract_info( ie_result['url'], ie_key=ie_result.get('ie_key'), extra_info=extra_info, download=False, process=False) force_properties = dict( (k, v) for k, v in ie_result.items() if v is not None) for f in ('_type', 'url'): if f in force_properties: del force_properties[f] new_result = info.copy() new_result.update(force_properties) assert new_result.get('_type') != 'url_transparent' return self.process_ie_result( new_result, download=download, extra_info=extra_info) elif result_type == 'playlist' or result_type == 'multi_video': # We process each entry in the playlist playlist = ie_result.get('title', None) or ie_result.get('id', None) self.to_screen('[download] Downloading playlist: %s' % playlist) playlist_results = [] playliststart = self.params.get('playliststart', 1) - 1 playlistend = self.params.get('playlistend', None) # For backwards compatibility, interpret -1 as whole list if playlistend == -1: playlistend = None playlistitems_str = self.params.get('playlist_items', None) playlistitems = None if playlistitems_str is not None: def iter_playlistitems(format): for string_segment in format.split(','): if '-' in string_segment: start, end = string_segment.split('-') for item in range(int(start), int(end) + 1): yield int(item) else: yield int(string_segment) playlistitems = iter_playlistitems(playlistitems_str) ie_entries = ie_result['entries'] if isinstance(ie_entries, list): n_all_entries = len(ie_entries) if playlistitems: entries = [ ie_entries[i - 1] for i in playlistitems if -n_all_entries <= i - 1 < n_all_entries] else: entries = ie_entries[playliststart:playlistend] n_entries = len(entries) self.to_screen( "[%s] playlist %s: Collected %d video ids (downloading %d of them)" % (ie_result['extractor'], playlist, n_all_entries, n_entries)) elif isinstance(ie_entries, PagedList): if playlistitems: entries = [] for item in playlistitems: entries.extend(ie_entries.getslice( item - 1, item )) else: entries = ie_entries.getslice( playliststart, playlistend) n_entries = len(entries) self.to_screen( "[%s] playlist %s: Downloading %d videos" % (ie_result['extractor'], playlist, n_entries)) else: # iterable if playlistitems: entry_list = list(ie_entries) entries = [entry_list[i - 1] for i in playlistitems] else: entries = list(itertools.islice( ie_entries, playliststart, playlistend)) n_entries = len(entries) self.to_screen( "[%s] playlist %s: Downloading %d videos" % (ie_result['extractor'], playlist, n_entries)) if self.params.get('playlistreverse', False): entries = entries[::-1] for i, entry in enumerate(entries, 1): self.to_screen('[download] Downloading video %s of %s' % (i, n_entries)) extra = { 'n_entries': n_entries, 'playlist': playlist, 'playlist_id': ie_result.get('id'), 'playlist_title': ie_result.get('title'), 'playlist_index': i + playliststart, 'extractor': ie_result['extractor'], 'webpage_url': ie_result['webpage_url'], 'webpage_url_basename': url_basename(ie_result['webpage_url']), 'extractor_key': ie_result['extractor_key'], } reason = self._match_entry(entry, incomplete=True) if reason is not None: self.to_screen('[download] ' + reason) continue entry_result = self.process_ie_result(entry, download=download, extra_info=extra) playlist_results.append(entry_result) ie_result['entries'] = playlist_results return ie_result elif result_type == 'compat_list': self.report_warning( 'Extractor %s returned a compat_list result. ' 'It needs to be updated.' % ie_result.get('extractor')) def _fixup(r): self.add_extra_info( r, { 'extractor': ie_result['extractor'], 'webpage_url': ie_result['webpage_url'], 'webpage_url_basename': url_basename(ie_result['webpage_url']), 'extractor_key': ie_result['extractor_key'], } ) return r ie_result['entries'] = [ self.process_ie_result(_fixup(r), download, extra_info) for r in ie_result['entries'] ] return ie_result else: raise Exception('Invalid result type: %s' % result_type) def _build_format_filter(self, filter_spec): " Returns a function to filter the formats according to the filter_spec " OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>width|height|tbr|abr|vbr|asr|filesize|fps) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?) $ ''' % '|'.join(map(re.escape, OPERATORS.keys()))) m = operator_rex.search(filter_spec) if m: try: comparison_value = int(m.group('value')) except ValueError: comparison_value = parse_filesize(m.group('value')) if comparison_value is None: comparison_value = parse_filesize(m.group('value') + 'B') if comparison_value is None: raise ValueError( 'Invalid value %r in format specification %r' % ( m.group('value'), filter_spec)) op = OPERATORS[m.group('op')] if not m: STR_OPERATORS = { '=': operator.eq, '!=': operator.ne, } str_operator_rex = re.compile(r'''(?x) \s*(?P<key>ext|acodec|vcodec|container|protocol) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)? \s*(?P<value>[a-zA-Z0-9_-]+) \s*$ ''' % '|'.join(map(re.escape, STR_OPERATORS.keys()))) m = str_operator_rex.search(filter_spec) if m: comparison_value = m.group('value') op = STR_OPERATORS[m.group('op')] if not m: raise ValueError('Invalid filter specification %r' % filter_spec) def _filter(f): actual_value = f.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) return _filter def build_format_selector(self, format_spec): def syntax_error(note, start): message = ( 'Invalid format specification: ' '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1])) return SyntaxError(message) PICKFIRST = 'PICKFIRST' MERGE = 'MERGE' SINGLE = 'SINGLE' GROUP = 'GROUP' FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters']) def _parse_filter(tokens): filter_parts = [] for type, string, start, _, _ in tokens: if type == tokenize.OP and string == ']': return ''.join(filter_parts) else: filter_parts.append(string) def _parse_format_selection(tokens, endwith=[]): selectors = [] current_selector = None for type, string, start, _, _ in tokens: # ENCODING is only defined in python 3.x if type == getattr(tokenize, 'ENCODING', None): continue elif type in [tokenize.NAME, tokenize.NUMBER]: current_selector = FormatSelector(SINGLE, string, []) elif type == tokenize.OP: if string in endwith: break elif string == ')': # ')' will be handled by the parentheses group tokens.restore_last_token() break if string == ',': selectors.append(current_selector) current_selector = None elif string == '/': first_choice = current_selector second_choice = _parse_format_selection(tokens, [',']) current_selector = None selectors.append(FormatSelector(PICKFIRST, (first_choice, second_choice), [])) elif string == '[': if not current_selector: current_selector = FormatSelector(SINGLE, 'best', []) format_filter = _parse_filter(tokens) current_selector.filters.append(format_filter) elif string == '(': if current_selector: raise syntax_error('Unexpected "("', start) current_selector = FormatSelector(GROUP, _parse_format_selection(tokens, [')']), []) elif string == '+': video_selector = current_selector audio_selector = _parse_format_selection(tokens, [',']) current_selector = None selectors.append(FormatSelector(MERGE, (video_selector, audio_selector), [])) else: raise syntax_error('Operator not recognized: "{0}"'.format(string), start) elif type == tokenize.ENDMARKER: break if current_selector: selectors.append(current_selector) return selectors def _build_selector_function(selector): if isinstance(selector, list): fs = [_build_selector_function(s) for s in selector] def selector_function(formats): for f in fs: for format in f(formats): yield format return selector_function elif selector.type == GROUP: selector_function = _build_selector_function(selector.selector) elif selector.type == PICKFIRST: fs = [_build_selector_function(s) for s in selector.selector] def selector_function(formats): for f in fs: picked_formats = list(f(formats)) if picked_formats: return picked_formats return [] elif selector.type == SINGLE: format_spec = selector.selector def selector_function(formats): if format_spec == 'all': for f in formats: yield f elif format_spec in ['best', 'worst', None]: format_idx = 0 if format_spec == 'worst' else -1 audiovideo_formats = [ f for f in formats if f.get('vcodec') != 'none' and f.get('acodec') != 'none'] if audiovideo_formats: yield audiovideo_formats[format_idx] # for audio only (soundcloud) or video only (imgur) urls, select the best/worst audio format elif (all(f.get('acodec') != 'none' for f in formats) or all(f.get('vcodec') != 'none' for f in formats)): yield formats[format_idx] elif format_spec == 'bestaudio': audio_formats = [ f for f in formats if f.get('vcodec') == 'none'] if audio_formats: yield audio_formats[-1] elif format_spec == 'worstaudio': audio_formats = [ f for f in formats if f.get('vcodec') == 'none'] if audio_formats: yield audio_formats[0] elif format_spec == 'bestvideo': video_formats = [ f for f in formats if f.get('acodec') == 'none'] if video_formats: yield video_formats[-1] elif format_spec == 'worstvideo': video_formats = [ f for f in formats if f.get('acodec') == 'none'] if video_formats: yield video_formats[0] else: extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav'] if format_spec in extensions: filter_f = lambda f: f['ext'] == format_spec else: filter_f = lambda f: f['format_id'] == format_spec matches = list(filter(filter_f, formats)) if matches: yield matches[-1] elif selector.type == MERGE: def _merge(formats_info): format_1, format_2 = [f['format_id'] for f in formats_info] # The first format must contain the video and the # second the audio if formats_info[0].get('vcodec') == 'none': self.report_error('The first format must ' 'contain the video, try using ' '"-f %s+%s"' % (format_2, format_1)) return output_ext = ( formats_info[0]['ext'] if self.params.get('merge_output_format') is None else self.params['merge_output_format']) return { 'requested_formats': formats_info, 'format': '%s+%s' % (formats_info[0].get('format'), formats_info[1].get('format')), 'format_id': '%s+%s' % (formats_info[0].get('format_id'), formats_info[1].get('format_id')), 'width': formats_info[0].get('width'), 'height': formats_info[0].get('height'), 'resolution': formats_info[0].get('resolution'), 'fps': formats_info[0].get('fps'), 'vcodec': formats_info[0].get('vcodec'), 'vbr': formats_info[0].get('vbr'), 'stretched_ratio': formats_info[0].get('stretched_ratio'), 'acodec': formats_info[1].get('acodec'), 'abr': formats_info[1].get('abr'), 'ext': output_ext, } video_selector, audio_selector = map(_build_selector_function, selector.selector) def selector_function(formats): formats = list(formats) for pair in itertools.product(video_selector(formats), audio_selector(formats)): yield _merge(pair) filters = [self._build_format_filter(f) for f in selector.filters] def final_selector(formats): for _filter in filters: formats = list(filter(_filter, formats)) return selector_function(formats) return final_selector stream = io.BytesIO(format_spec.encode('utf-8')) try: tokens = list(compat_tokenize_tokenize(stream.readline)) except tokenize.TokenError: raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec))) class TokenIterator(object): def __init__(self, tokens): self.tokens = tokens self.counter = 0 def __iter__(self): return self def __next__(self): if self.counter >= len(self.tokens): raise StopIteration() value = self.tokens[self.counter] self.counter += 1 return value next = __next__ def restore_last_token(self): self.counter -= 1 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens))) return _build_selector_function(parsed_selector) def _calc_headers(self, info_dict): res = std_headers.copy() add_headers = info_dict.get('http_headers') if add_headers: res.update(add_headers) cookies = self._calc_cookies(info_dict) if cookies: res['Cookie'] = cookies return res def _calc_cookies(self, info_dict): pr = compat_urllib_request.Request(info_dict['url']) self.cookiejar.add_cookie_header(pr) return pr.get_header('Cookie') def process_video_result(self, info_dict, download=True): assert info_dict.get('_type', 'video') == 'video' if 'id' not in info_dict: raise ExtractorError('Missing "id" field in extractor result') if 'title' not in info_dict: raise ExtractorError('Missing "title" field in extractor result') if 'playlist' not in info_dict: # It isn't part of a playlist info_dict['playlist'] = None info_dict['playlist_index'] = None thumbnails = info_dict.get('thumbnails') if thumbnails is None: thumbnail = info_dict.get('thumbnail') if thumbnail: info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}] if thumbnails: thumbnails.sort(key=lambda t: ( t.get('preference'), t.get('width'), t.get('height'), t.get('id'), t.get('url'))) for i, t in enumerate(thumbnails): if 'width' in t and 'height' in t: t['resolution'] = '%dx%d' % (t['width'], t['height']) if t.get('id') is None: t['id'] = '%d' % i if thumbnails and 'thumbnail' not in info_dict: info_dict['thumbnail'] = thumbnails[-1]['url'] if 'display_id' not in info_dict and 'id' in info_dict: info_dict['display_id'] = info_dict['id'] if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None: # Working around out-of-range timestamp values (e.g. negative ones on Windows, # see http://bugs.python.org/issue1646728) try: upload_date = datetime.datetime.utcfromtimestamp(info_dict['timestamp']) info_dict['upload_date'] = upload_date.strftime('%Y%m%d') except (ValueError, OverflowError, OSError): pass if self.params.get('listsubtitles', False): if 'automatic_captions' in info_dict: self.list_subtitles(info_dict['id'], info_dict.get('automatic_captions'), 'automatic captions') self.list_subtitles(info_dict['id'], info_dict.get('subtitles'), 'subtitles') return info_dict['requested_subtitles'] = self.process_subtitles( info_dict['id'], info_dict.get('subtitles'), info_dict.get('automatic_captions')) # We now pick which formats have to be downloaded if info_dict.get('formats') is None: # There's only one format available formats = [info_dict] else: formats = info_dict['formats'] if not formats: raise ExtractorError('No video formats found!') formats_dict = {} # We check that all the formats have the format and format_id fields for i, format in enumerate(formats): if 'url' not in format: raise ExtractorError('Missing "url" key in result (index %d)' % i) if format.get('format_id') is None: format['format_id'] = compat_str(i) format_id = format['format_id'] if format_id not in formats_dict: formats_dict[format_id] = [] formats_dict[format_id].append(format) # Make sure all formats have unique format_id for format_id, ambiguous_formats in formats_dict.items(): if len(ambiguous_formats) > 1: for i, format in enumerate(ambiguous_formats): format['format_id'] = '%s-%d' % (format_id, i) for i, format in enumerate(formats): if format.get('format') is None: format['format'] = '{id} - {res}{note}'.format( id=format['format_id'], res=self.format_resolution(format), note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '', ) # Automatically determine file extension if missing if 'ext' not in format: format['ext'] = determine_ext(format['url']).lower() # Add HTTP headers, so that external programs can use them from the # json output full_format_info = info_dict.copy() full_format_info.update(format) format['http_headers'] = self._calc_headers(full_format_info) # TODO Central sorting goes here if formats[0] is not info_dict: # only set the 'formats' fields if the original info_dict list them # otherwise we end up with a circular reference, the first (and unique) # element in the 'formats' field in info_dict is info_dict itself, # wich can't be exported to json info_dict['formats'] = formats if self.params.get('listformats'): self.list_formats(info_dict) return if self.params.get('list_thumbnails'): self.list_thumbnails(info_dict) return req_format = self.params.get('format') if req_format is None: req_format_list = [] if (self.params.get('outtmpl', DEFAULT_OUTTMPL) != '-' and info_dict['extractor'] in ['youtube', 'ted']): merger = FFmpegMergerPP(self) if merger.available and merger.can_merge(): req_format_list.append('bestvideo+bestaudio') req_format_list.append('best') req_format = '/'.join(req_format_list) format_selector = self.build_format_selector(req_format) formats_to_download = list(format_selector(formats)) if not formats_to_download: raise ExtractorError('requested format not available', expected=True) if download: if len(formats_to_download) > 1: self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download))) for format in formats_to_download: new_info = dict(info_dict) new_info.update(format) self.process_info(new_info) # We update the info dict with the best quality format (backwards compatibility) info_dict.update(formats_to_download[-1]) return info_dict def process_subtitles(self, video_id, normal_subtitles, automatic_captions): """Select the requested subtitles and their format""" available_subs = {} if normal_subtitles and self.params.get('writesubtitles'): available_subs.update(normal_subtitles) if automatic_captions and self.params.get('writeautomaticsub'): for lang, cap_info in automatic_captions.items(): if lang not in available_subs: available_subs[lang] = cap_info if (not self.params.get('writesubtitles') and not self.params.get('writeautomaticsub') or not available_subs): return None if self.params.get('allsubtitles', False): requested_langs = available_subs.keys() else: if self.params.get('subtitleslangs', False): requested_langs = self.params.get('subtitleslangs') elif 'en' in available_subs: requested_langs = ['en'] else: requested_langs = [list(available_subs.keys())[0]] formats_query = self.params.get('subtitlesformat', 'best') formats_preference = formats_query.split('/') if formats_query else [] subs = {} for lang in requested_langs: formats = available_subs.get(lang) if formats is None: self.report_warning('%s subtitles not available for %s' % (lang, video_id)) continue for ext in formats_preference: if ext == 'best': f = formats[-1] break matches = list(filter(lambda f: f['ext'] == ext, formats)) if matches: f = matches[-1] break else: f = formats[-1] self.report_warning( 'No subtitle format found matching "%s" for language %s, ' 'using %s' % (formats_query, lang, f['ext'])) subs[lang] = f return subs def process_info(self, info_dict): """Process a single resolved IE result.""" assert info_dict.get('_type', 'video') == 'video' max_downloads = self.params.get('max_downloads') if max_downloads is not None: if self._num_downloads >= int(max_downloads): raise MaxDownloadsReached() info_dict['fulltitle'] = info_dict['title'] if len(info_dict['title']) > 200: info_dict['title'] = info_dict['title'][:197] + '...' if 'format' not in info_dict: info_dict['format'] = info_dict['ext'] reason = self._match_entry(info_dict, incomplete=False) if reason is not None: self.to_screen('[download] ' + reason) return self._num_downloads += 1 info_dict['_filename'] = filename = self.prepare_filename(info_dict) # Forced printings if self.params.get('forcetitle', False): self.to_stdout(info_dict['fulltitle']) if self.params.get('forceid', False): self.to_stdout(info_dict['id']) if self.params.get('forceurl', False): if info_dict.get('requested_formats') is not None: for f in info_dict['requested_formats']: self.to_stdout(f['url'] + f.get('play_path', '')) else: # For RTMP URLs, also include the playpath self.to_stdout(info_dict['url'] + info_dict.get('play_path', '')) if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None: self.to_stdout(info_dict['thumbnail']) if self.params.get('forcedescription', False) and info_dict.get('description') is not None: self.to_stdout(info_dict['description']) if self.params.get('forcefilename', False) and filename is not None: self.to_stdout(filename) if self.params.get('forceduration', False) and info_dict.get('duration') is not None: self.to_stdout(formatSeconds(info_dict['duration'])) if self.params.get('forceformat', False): self.to_stdout(info_dict['format']) if self.params.get('forcejson', False): self.to_stdout(json.dumps(info_dict)) # Do nothing else if in simulate mode if self.params.get('simulate', False): return if filename is None: return try: dn = os.path.dirname(sanitize_path(encodeFilename(filename))) if dn and not os.path.exists(dn): os.makedirs(dn) except (OSError, IOError) as err: self.report_error('unable to create directory ' + compat_str(err)) return if self.params.get('writedescription', False): descfn = replace_extension(filename, 'description', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)): self.to_screen('[info] Video description is already present') elif info_dict.get('description') is None: self.report_warning('There\'s no description to write.') else: try: self.to_screen('[info] Writing video description to: ' + descfn) with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: descfile.write(info_dict['description']) except (OSError, IOError): self.report_error('Cannot write description file ' + descfn) return if self.params.get('writeannotations', False): annofn = replace_extension(filename, 'annotations.xml', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)): self.to_screen('[info] Video annotations are already present') else: try: self.to_screen('[info] Writing video annotations to: ' + annofn) with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile: annofile.write(info_dict['annotations']) except (KeyError, TypeError): self.report_warning('There are no annotations to write.') except (OSError, IOError): self.report_error('Cannot write annotations file: ' + annofn) return subtitles_are_requested = any([self.params.get('writesubtitles', False), self.params.get('writeautomaticsub')]) if subtitles_are_requested and info_dict.get('requested_subtitles'): # subtitles download errors are already managed as troubles in relevant IE # that way it will silently go on when used with unsupporting IE subtitles = info_dict['requested_subtitles'] ie = self.get_info_extractor(info_dict['extractor_key']) for sub_lang, sub_info in subtitles.items(): sub_format = sub_info['ext'] if sub_info.get('data') is not None: sub_data = sub_info['data'] else: try: sub_data = ie._download_webpage( sub_info['url'], info_dict['id'], note=False) except ExtractorError as err: self.report_warning('Unable to download subtitle for "%s": %s' % (sub_lang, compat_str(err.cause))) continue try: sub_filename = subtitles_filename(filename, sub_lang, sub_format) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)): self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format)) else: self.to_screen('[info] Writing video subtitles to: ' + sub_filename) with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile: subfile.write(sub_data) except (OSError, IOError): self.report_error('Cannot write subtitles file ' + sub_filename) return if self.params.get('writeinfojson', False): infofn = replace_extension(filename, 'info.json', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)): self.to_screen('[info] Video description metadata is already present') else: self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn) try: write_json_file(self.filter_requested_info(info_dict), infofn) except (OSError, IOError): self.report_error('Cannot write metadata to JSON file ' + infofn) return self._write_thumbnails(info_dict, filename) if not self.params.get('skip_download', False): try: def dl(name, info): fd = get_suitable_downloader(info, self.params)(self, self.params) for ph in self._progress_hooks: fd.add_progress_hook(ph) if self.params.get('verbose'): self.to_stdout('[debug] Invoking downloader on %r' % info.get('url')) return fd.download(name, info) if info_dict.get('requested_formats') is not None: downloaded = [] success = True merger = FFmpegMergerPP(self) if not merger.available: postprocessors = [] self.report_warning('You have requested multiple ' 'formats but ffmpeg or avconv are not installed.' ' The formats won\'t be merged.') else: postprocessors = [merger] def compatible_formats(formats): video, audio = formats # Check extension video_ext, audio_ext = audio.get('ext'), video.get('ext') if video_ext and audio_ext: COMPATIBLE_EXTS = ( ('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v'), ('webm') ) for exts in COMPATIBLE_EXTS: if video_ext in exts and audio_ext in exts: return True # TODO: Check acodec/vcodec return False filename_real_ext = os.path.splitext(filename)[1][1:] filename_wo_ext = ( os.path.splitext(filename)[0] if filename_real_ext == info_dict['ext'] else filename) requested_formats = info_dict['requested_formats'] if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats): info_dict['ext'] = 'mkv' self.report_warning( 'Requested formats are incompatible for merge and will be merged into mkv.') # Ensure filename always has a correct extension for successful merge filename = '%s.%s' % (filename_wo_ext, info_dict['ext']) if os.path.exists(encodeFilename(filename)): self.to_screen( '[download] %s has already been downloaded and ' 'merged' % filename) else: for f in requested_formats: new_info = dict(info_dict) new_info.update(f) fname = self.prepare_filename(new_info) fname = prepend_extension(fname, 'f%s' % f['format_id'], new_info['ext']) downloaded.append(fname) partial_success = dl(fname, new_info) success = success and partial_success info_dict['__postprocessors'] = postprocessors info_dict['__files_to_merge'] = downloaded else: # Just a single file success = dl(filename, info_dict) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_error('unable to download video data: %s' % str(err)) return except (OSError, IOError) as err: raise UnavailableVideoError(err) except (ContentTooShortError, ) as err: self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) return if success: # Fixup content fixup_policy = self.params.get('fixup') if fixup_policy is None: fixup_policy = 'detect_or_warn' stretched_ratio = info_dict.get('stretched_ratio') if stretched_ratio is not None and stretched_ratio != 1: if fixup_policy == 'warn': self.report_warning('%s: Non-uniform pixel ratio (%s)' % ( info_dict['id'], stretched_ratio)) elif fixup_policy == 'detect_or_warn': stretched_pp = FFmpegFixupStretchedPP(self) if stretched_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(stretched_pp) else: self.report_warning( '%s: Non-uniform pixel ratio (%s). Install ffmpeg or avconv to fix this automatically.' % ( info_dict['id'], stretched_ratio)) else: assert fixup_policy in ('ignore', 'never') if info_dict.get('requested_formats') is None and info_dict.get('container') == 'm4a_dash': if fixup_policy == 'warn': self.report_warning('%s: writing DASH m4a. Only some players support this container.' % ( info_dict['id'])) elif fixup_policy == 'detect_or_warn': fixup_pp = FFmpegFixupM4aPP(self) if fixup_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(fixup_pp) else: self.report_warning( '%s: writing DASH m4a. Only some players support this container. Install ffmpeg or avconv to fix this automatically.' % ( info_dict['id'])) else: assert fixup_policy in ('ignore', 'never') try: self.post_process(filename, info_dict) except (PostProcessingError) as err: self.report_error('postprocessing: %s' % str(err)) return self.record_download_archive(info_dict) def download(self, url_list): """Download a given list of URLs.""" outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) if (len(url_list) > 1 and '%' not in outtmpl and self.params.get('max_downloads') != 1): raise SameFileError(outtmpl) for url in url_list: try: # It also downloads the videos res = self.extract_info( url, force_generic_extractor=self.params.get('force_generic_extractor', False)) except UnavailableVideoError: self.report_error('unable to download video') except MaxDownloadsReached: self.to_screen('[info] Maximum number of downloaded files reached.') raise else: if self.params.get('dump_single_json', False): self.to_stdout(json.dumps(res)) return self._download_retcode def download_with_info_file(self, info_filename): with contextlib.closing(fileinput.FileInput( [info_filename], mode='r', openhook=fileinput.hook_encoded('utf-8'))) as f: # FileInput doesn't have a read method, we can't call json.load info = self.filter_requested_info(json.loads('\n'.join(f))) try: self.process_ie_result(info, download=True) except DownloadError: webpage_url = info.get('webpage_url') if webpage_url is not None: self.report_warning('The info failed to download, trying with "%s"' % webpage_url) return self.download([webpage_url]) else: raise return self._download_retcode @staticmethod def filter_requested_info(info_dict): return dict( (k, v) for k, v in info_dict.items() if k not in ['requested_formats', 'requested_subtitles']) def post_process(self, filename, ie_info): """Run all the postprocessors on the given file.""" info = dict(ie_info) info['filepath'] = filename pps_chain = [] if ie_info.get('__postprocessors') is not None: pps_chain.extend(ie_info['__postprocessors']) pps_chain.extend(self._pps) for pp in pps_chain: files_to_delete = [] try: files_to_delete, info = pp.run(info) except PostProcessingError as e: self.report_error(e.msg) if files_to_delete and not self.params.get('keepvideo', False): for old_filename in files_to_delete: self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename) try: os.remove(encodeFilename(old_filename)) except (IOError, OSError): self.report_warning('Unable to remove downloaded original file') def _make_archive_id(self, info_dict): # Future-proof against any change in case # and backwards compatibility with prior versions extractor = info_dict.get('extractor_key') if extractor is None: if 'id' in info_dict: extractor = info_dict.get('ie_key') # key in a playlist if extractor is None: return None # Incomplete video information return extractor.lower() + ' ' + info_dict['id'] def in_download_archive(self, info_dict): fn = self.params.get('download_archive') if fn is None: return False vid_id = self._make_archive_id(info_dict) if vid_id is None: return False # Incomplete video information try: with locked_file(fn, 'r', encoding='utf-8') as archive_file: for line in archive_file: if line.strip() == vid_id: return True except IOError as ioe: if ioe.errno != errno.ENOENT: raise return False def record_download_archive(self, info_dict): fn = self.params.get('download_archive') if fn is None: return vid_id = self._make_archive_id(info_dict) assert vid_id with locked_file(fn, 'a', encoding='utf-8') as archive_file: archive_file.write(vid_id + '\n') @staticmethod def format_resolution(format, default='unknown'): if format.get('vcodec') == 'none': return 'audio only' if format.get('resolution') is not None: return format['resolution'] if format.get('height') is not None: if format.get('width') is not None: res = '%sx%s' % (format['width'], format['height']) else: res = '%sp' % format['height'] elif format.get('width') is not None: res = '?x%d' % format['width'] else: res = default return res def _format_note(self, fdict): res = '' if fdict.get('ext') in ['f4f', 'f4m']: res += '(unsupported) ' if fdict.get('format_note') is not None: res += fdict['format_note'] + ' ' if fdict.get('tbr') is not None: res += '%4dk ' % fdict['tbr'] if fdict.get('container') is not None: if res: res += ', ' res += '%s container' % fdict['container'] if (fdict.get('vcodec') is not None and fdict.get('vcodec') != 'none'): if res: res += ', ' res += fdict['vcodec'] if fdict.get('vbr') is not None: res += '@' elif fdict.get('vbr') is not None and fdict.get('abr') is not None: res += 'video@' if fdict.get('vbr') is not None: res += '%4dk' % fdict['vbr'] if fdict.get('fps') is not None: res += ', %sfps' % fdict['fps'] if fdict.get('acodec') is not None: if res: res += ', ' if fdict['acodec'] == 'none': res += 'video only' else: res += '%-5s' % fdict['acodec'] elif fdict.get('abr') is not None: if res: res += ', ' res += 'audio' if fdict.get('abr') is not None: res += '@%3dk' % fdict['abr'] if fdict.get('asr') is not None: res += ' (%5dHz)' % fdict['asr'] if fdict.get('filesize') is not None: if res: res += ', ' res += format_bytes(fdict['filesize']) elif fdict.get('filesize_approx') is not None: if res: res += ', ' res += '~' + format_bytes(fdict['filesize_approx']) return res def list_formats(self, info_dict): formats = info_dict.get('formats', [info_dict]) table = [ [f['format_id'], f['ext'], self.format_resolution(f), self._format_note(f)] for f in formats if f.get('preference') is None or f['preference'] >= -1000] if len(formats) > 1: table[-1][-1] += (' ' if table[-1][-1] else '') + '(best)' header_line = ['format code', 'extension', 'resolution', 'note'] self.to_screen( '[info] Available formats for %s:\n%s' % (info_dict['id'], render_table(header_line, table))) def list_thumbnails(self, info_dict): thumbnails = info_dict.get('thumbnails') if not thumbnails: tn_url = info_dict.get('thumbnail') if tn_url: thumbnails = [{'id': '0', 'url': tn_url}] else: self.to_screen( '[info] No thumbnails present for %s' % info_dict['id']) return self.to_screen( '[info] Thumbnails for %s:' % info_dict['id']) self.to_screen(render_table( ['ID', 'width', 'height', 'URL'], [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails])) def list_subtitles(self, video_id, subtitles, name='subtitles'): if not subtitles: self.to_screen('%s has no %s' % (video_id, name)) return self.to_screen( 'Available %s for %s:' % (name, video_id)) self.to_screen(render_table( ['Language', 'formats'], [[lang, ', '.join(f['ext'] for f in reversed(formats))] for lang, formats in subtitles.items()])) def urlopen(self, req): """ Start an HTTP download """ # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one req_is_string = isinstance(req, compat_basestring) url = req if req_is_string else req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: if req_is_string: req = url_escaped else: req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request req = req_type( url_escaped, data=req.data, headers=req.headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) return self._opener.open(req, timeout=self._socket_timeout) def print_debug_header(self): if not self.params.get('verbose'): return if type('') is not compat_str: # Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326) self.report_warning( 'Your Python is broken! Update to a newer and supported version') stdout_encoding = getattr( sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__) encoding_str = ( '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % ( locale.getpreferredencoding(), sys.getfilesystemencoding(), stdout_encoding, self.get_encoding())) write_string(encoding_str, encoding=None) self._write_string('[debug] youtube-dl version ' + __version__ + '\n') try: sp = subprocess.Popen( ['git', 'rev-parse', '--short', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=os.path.dirname(os.path.abspath(__file__))) out, err = sp.communicate() out = out.decode().strip() if re.match('[0-9a-f]+', out): self._write_string('[debug] Git HEAD: ' + out + '\n') except Exception: try: sys.exc_clear() except Exception: pass self._write_string('[debug] Python version %s - %s\n' % ( platform.python_version(), platform_name())) exe_versions = FFmpegPostProcessor.get_versions(self) exe_versions['rtmpdump'] = rtmpdump_version() exe_str = ', '.join( '%s %s' % (exe, v) for exe, v in sorted(exe_versions.items()) if v ) if not exe_str: exe_str = 'none' self._write_string('[debug] exe versions: %s\n' % exe_str) proxy_map = {} for handler in self._opener.handlers: if hasattr(handler, 'proxies'): proxy_map.update(handler.proxies) self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n') if self.params.get('call_home', False): ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8') self._write_string('[debug] Public IP address: %s\n' % ipaddr) latest_version = self.urlopen( 'https://yt-dl.org/latest/version').read().decode('utf-8') if version_tuple(latest_version) > version_tuple(__version__): self.report_warning( 'You are using an outdated version (newest version: %s)! ' 'See https://yt-dl.org/update if you need help updating.' % latest_version) def _setup_opener(self): timeout_val = self.params.get('socket_timeout') self._socket_timeout = 600 if timeout_val is None else float(timeout_val) opts_cookiefile = self.params.get('cookiefile') opts_proxy = self.params.get('proxy') if opts_cookiefile is None: self.cookiejar = compat_cookiejar.CookieJar() else: self.cookiejar = compat_cookiejar.MozillaCookieJar( opts_cookiefile) if os.access(opts_cookiefile, os.R_OK): self.cookiejar.load() cookie_processor = compat_urllib_request.HTTPCookieProcessor( self.cookiejar) if opts_proxy is not None: if opts_proxy == '': proxies = {} else: proxies = {'http': opts_proxy, 'https': opts_proxy} else: proxies = compat_urllib_request.getproxies() # Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805) if 'http' in proxies and 'https' not in proxies: proxies['https'] = proxies['http'] proxy_handler = PerRequestProxyHandler(proxies) debuglevel = 1 if self.params.get('debug_printtraffic') else 0 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel) ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel) opener = compat_urllib_request.build_opener( proxy_handler, https_handler, cookie_processor, ydlh) # Delete the default user-agent header, which would otherwise apply in # cases where our custom HTTP handler doesn't come into play # (See https://github.com/rg3/youtube-dl/issues/1309 for details) opener.addheaders = [] self._opener = opener def encode(self, s): if isinstance(s, bytes): return s # Already encoded try: return s.encode(self.get_encoding()) except UnicodeEncodeError as err: err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.' raise def get_encoding(self): encoding = self.params.get('encoding') if encoding is None: encoding = preferredencoding() return encoding def _write_thumbnails(self, info_dict, filename): if self.params.get('writethumbnail', False): thumbnails = info_dict.get('thumbnails') if thumbnails: thumbnails = [thumbnails[-1]] elif self.params.get('write_all_thumbnails', False): thumbnails = info_dict.get('thumbnails') else: return if not thumbnails: # No thumbnails present, so return immediately return for t in thumbnails: thumb_ext = determine_ext(t['url'], 'jpg') suffix = '_%s' % t['id'] if len(thumbnails) > 1 else '' thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else '' t['filename'] = thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)): self.to_screen('[%s] %s: Thumbnail %sis already present' % (info_dict['extractor'], info_dict['id'], thumb_display_id)) else: self.to_screen('[%s] %s: Downloading thumbnail %s...' % (info_dict['extractor'], info_dict['id'], thumb_display_id)) try: uf = self.urlopen(t['url']) with open(thumb_filename, 'wb') as thumbf: shutil.copyfileobj(uf, thumbf) self.to_screen('[%s] %s: Writing thumbnail %sto: %s' % (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename)) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_warning('Unable to download thumbnail "%s": %s' % (t['url'], compat_str(err))) #!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import collections import contextlib import datetime import errno import fileinput import io import itertools import json import locale import operator import os import platform import re import shutil import subprocess import socket import sys import time import tokenize import traceback if os.name == 'nt': import ctypes from .compat import ( compat_basestring, compat_cookiejar, compat_expanduser, compat_get_terminal_size, compat_http_client, compat_kwargs, compat_str, compat_tokenize_tokenize, compat_urllib_error, compat_urllib_request, ) from .utils import ( escape_url, ContentTooShortError, date_from_str, DateRange, DEFAULT_OUTTMPL, determine_ext, DownloadError, encodeFilename, ExtractorError, format_bytes, formatSeconds, HEADRequest, locked_file, make_HTTPS_handler, MaxDownloadsReached, PagedList, parse_filesize, PerRequestProxyHandler, PostProcessingError, platform_name, preferredencoding, render_table, SameFileError, sanitize_filename, sanitize_path, std_headers, subtitles_filename, UnavailableVideoError, url_basename, version_tuple, write_json_file, write_string, YoutubeDLHandler, prepend_extension, replace_extension, args_to_str, age_restricted, ) from .cache import Cache from .extractor import get_info_extractor, gen_extractors from .downloader import get_suitable_downloader from .downloader.rtmp import rtmpdump_version from .postprocessor import ( FFmpegFixupM4aPP, FFmpegFixupStretchedPP, FFmpegMergerPP, FFmpegPostProcessor, get_postprocessor, ) from .version import __version__ class YoutubeDL(object): """YoutubeDL class. YoutubeDL objects are the ones responsible of downloading the actual video file and writing it to disk if the user has requested it, among some other tasks. In most cases there should be one per program. As, given a video URL, the downloader doesn't know how to extract all the needed information, task that InfoExtractors do, it has to pass the URL to one of them. For this, YoutubeDL objects have a method that allows InfoExtractors to be registered in a given order. When it is passed a URL, the YoutubeDL object handles it to the first InfoExtractor it finds that reports being able to handle it. The InfoExtractor extracts all the information about the video or videos the URL refers to, and YoutubeDL process the extracted information, possibly using a File Downloader to download the video. YoutubeDL objects accept a lot of parameters. In order not to saturate the object constructor with arguments, it receives a dictionary of options instead. These options are available through the params attribute for the InfoExtractors to use. The YoutubeDL also registers itself as the downloader in charge for the InfoExtractors that are added to it, so this is a "mutual registration". Available options: username: Username for authentication purposes. password: Password for authentication purposes. videopassword: Password for accessing a video. usenetrc: Use netrc for authentication instead. verbose: Print additional info to stdout. quiet: Do not print messages to stdout. no_warnings: Do not print out anything for warnings. forceurl: Force printing final URL. forcetitle: Force printing title. forceid: Force printing ID. forcethumbnail: Force printing thumbnail URL. forcedescription: Force printing description. forcefilename: Force printing final filename. forceduration: Force printing duration. forcejson: Force printing info_dict as JSON. dump_single_json: Force printing the info_dict of the whole playlist (or video) as a single JSON line. simulate: Do not download the video files. format: Video format code. See options.py for more information. outtmpl: Template for output names. restrictfilenames: Do not allow "&" and spaces in file names ignoreerrors: Do not stop on download errors. force_generic_extractor: Force downloader to use the generic extractor nooverwrites: Prevent overwriting files. playliststart: Playlist item to start at. playlistend: Playlist item to end at. playlist_items: Specific indices of playlist to download. playlistreverse: Download playlist items in reverse order. matchtitle: Download only matching titles. rejecttitle: Reject downloads for matching titles. logger: Log messages to a logging.Logger instance. logtostderr: Log messages to stderr instead of stdout. writedescription: Write the video description to a .description file writeinfojson: Write the video description to a .info.json file writeannotations: Write the video annotations to a .annotations.xml file writethumbnail: Write the thumbnail image to a file write_all_thumbnails: Write all thumbnail formats to files writesubtitles: Write the video subtitles to a file writeautomaticsub: Write the automatic subtitles to a file allsubtitles: Downloads all the subtitles of the video (requires writesubtitles or writeautomaticsub) listsubtitles: Lists all available subtitles for the video subtitlesformat: The format code for subtitles subtitleslangs: List of languages of the subtitles to download keepvideo: Keep the video file after post-processing daterange: A DateRange object, download only if the upload_date is in the range. skip_download: Skip the actual download of the video file cachedir: Location of the cache files in the filesystem. False to disable filesystem cache. noplaylist: Download single video instead of a playlist if in doubt. age_limit: An integer representing the user's age in years. Unsuitable videos for the given age are skipped. min_views: An integer representing the minimum view count the video must have in order to not be skipped. Videos without view count information are always downloaded. None for no limit. max_views: An integer representing the maximum view count. Videos that are more popular than that are not downloaded. Videos without view count information are always downloaded. None for no limit. download_archive: File name of a file where all downloads are recorded. Videos already present in the file are not downloaded again. cookiefile: File name where cookies should be read from and dumped to. nocheckcertificate:Do not verify SSL certificates prefer_insecure: Use HTTP instead of HTTPS to retrieve information. At the moment, this is only supported by YouTube. proxy: URL of the proxy server to use cn_verification_proxy: URL of the proxy to use for IP address verification on Chinese sites. (Experimental) socket_timeout: Time to wait for unresponsive hosts, in seconds bidi_workaround: Work around buggy terminals without bidirectional text support, using fridibi debug_printtraffic:Print out sent and received HTTP traffic include_ads: Download ads as well default_search: Prepend this string if an input url is not valid. 'auto' for elaborate guessing encoding: Use this encoding instead of the system-specified. extract_flat: Do not resolve URLs, return the immediate result. Pass in 'in_playlist' to only show this behavior for playlist items. postprocessors: A list of dictionaries, each with an entry * key: The name of the postprocessor. See youtube_dl/postprocessor/__init__.py for a list. as well as any further keyword arguments for the postprocessor. progress_hooks: A list of functions that get called on download progress, with a dictionary with the entries * status: One of "downloading", "error", or "finished". Check this first and ignore unknown values. If status is one of "downloading", or "finished", the following properties may also be present: * filename: The final filename (always present) * tmpfilename: The filename we're currently writing to * downloaded_bytes: Bytes on disk * total_bytes: Size of the whole file, None if unknown * total_bytes_estimate: Guess of the eventual file size, None if unavailable. * elapsed: The number of seconds since download started. * eta: The estimated time in seconds, None if unknown * speed: The download speed in bytes/second, None if unknown * fragment_index: The counter of the currently downloaded video fragment. * fragment_count: The number of fragments (= individual files that will be merged) Progress hooks are guaranteed to be called at least once (with status "finished") if the download is successful. merge_output_format: Extension to use when merging formats. fixup: Automatically correct known faults of the file. One of: - "never": do nothing - "warn": only emit a warning - "detect_or_warn": check whether we can do anything about it, warn otherwise (default) source_address: (Experimental) Client-side IP address to bind to. call_home: Boolean, true iff we are allowed to contact the youtube-dl servers for debugging. sleep_interval: Number of seconds to sleep before each download. listformats: Print an overview of available video formats and exit. list_thumbnails: Print a table of all thumbnails and exit. match_filter: A function that gets called with the info_dict of every video. If it returns a message, the video is ignored. If it returns None, the video is downloaded. match_filter_func in utils.py is one example for this. no_color: Do not emit color codes in output. The following options determine which downloader is picked: external_downloader: Executable of the external downloader to call. None or unset for standard (built-in) downloader. hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv. The following parameters are not used by YoutubeDL itself, they are used by the downloader (see youtube_dl/downloader/common.py): nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test, noresizebuffer, retries, continuedl, noprogress, consoletitle, xattr_set_filesize, external_downloader_args. The following options are used by the post processors: prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available, otherwise prefer avconv. """ params = None _ies = [] _pps = [] _download_retcode = None _num_downloads = None _screen_file = None def __init__(self, params=None, auto_init=True): """Create a FileDownloader object with the given options.""" if params is None: params = {} self._ies = [] self._ies_instances = {} self._pps = [] self._progress_hooks = [] self._download_retcode = 0 self._num_downloads = 0 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] self._err_file = sys.stderr self.params = params self.cache = Cache(self) if params.get('bidi_workaround', False): try: import pty master, slave = pty.openpty() width = compat_get_terminal_size().columns if width is None: width_args = [] else: width_args = ['-w', str(width)] sp_kwargs = dict( stdin=subprocess.PIPE, stdout=slave, stderr=self._err_file) try: self._output_process = subprocess.Popen( ['bidiv'] + width_args, **sp_kwargs ) except OSError: self._output_process = subprocess.Popen( ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs) self._output_channel = os.fdopen(master, 'rb') except OSError as ose: if ose.errno == 2: self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.') else: raise if (sys.version_info >= (3,) and sys.platform != 'win32' and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] and not params.get('restrictfilenames', False)): # On Python 3, the Unicode filesystem API will throw errors (#1474) self.report_warning( 'Assuming --restrict-filenames since file system encoding ' 'cannot encode all characters. ' 'Set the LC_ALL environment variable to fix this.') self.params['restrictfilenames'] = True if isinstance(params.get('outtmpl'), bytes): self.report_warning( 'Parameter outtmpl is bytes, but should be a unicode string. ' 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.') self._setup_opener() if auto_init: self.print_debug_header() self.add_default_info_extractors() for pp_def_raw in self.params.get('postprocessors', []): pp_class = get_postprocessor(pp_def_raw['key']) pp_def = dict(pp_def_raw) del pp_def['key'] pp = pp_class(self, **compat_kwargs(pp_def)) self.add_post_processor(pp) for ph in self.params.get('progress_hooks', []): self.add_progress_hook(ph) def warn_if_short_id(self, argv): # short YouTube ID starting with dash? idxs = [ i for i, a in enumerate(argv) if re.match(r'^-[0-9A-Za-z_-]{10}$', a)] if idxs: correct_argv = ( ['youtube-dl'] + [a for i, a in enumerate(argv) if i not in idxs] + ['--'] + [argv[i] for i in idxs] ) self.report_warning( 'Long argument string detected. ' 'Use -- to separate parameters and URLs, like this:\n%s\n' % args_to_str(correct_argv)) def add_info_extractor(self, ie): """Add an InfoExtractor object to the end of the list.""" self._ies.append(ie) self._ies_instances[ie.ie_key()] = ie ie.set_downloader(self) def get_info_extractor(self, ie_key): """ Get an instance of an IE with name ie_key, it will try to get one from the _ies list, if there's no instance it will create a new one and add it to the extractor list. """ ie = self._ies_instances.get(ie_key) if ie is None: ie = get_info_extractor(ie_key)() self.add_info_extractor(ie) return ie def add_default_info_extractors(self): """ Add the InfoExtractors returned by gen_extractors to the end of the list """ for ie in gen_extractors(): self.add_info_extractor(ie) def add_post_processor(self, pp): """Add a PostProcessor object to the end of the chain.""" self._pps.append(pp) pp.set_downloader(self) def add_progress_hook(self, ph): """Add the progress hook (currently only for the file downloader)""" self._progress_hooks.append(ph) def _bidi_workaround(self, message): if not hasattr(self, '_output_channel'): return message assert hasattr(self, '_output_process') assert isinstance(message, compat_str) line_count = message.count('\n') + 1 self._output_process.stdin.write((message + '\n').encode('utf-8')) self._output_process.stdin.flush() res = ''.join(self._output_channel.readline().decode('utf-8') for _ in range(line_count)) return res[:-len('\n')] def to_screen(self, message, skip_eol=False): """Print message to stdout if not in quiet mode.""" return self.to_stdout(message, skip_eol, check_quiet=True) def _write_string(self, s, out=None): write_string(s, out=out, encoding=self.params.get('encoding')) def to_stdout(self, message, skip_eol=False, check_quiet=False): """Print message to stdout if not in quiet mode.""" if self.params.get('logger'): self.params['logger'].debug(message) elif not check_quiet or not self.params.get('quiet', False): message = self._bidi_workaround(message) terminator = ['\n', ''][skip_eol] output = message + terminator self._write_string(output, self._screen_file) def to_stderr(self, message): """Print message to stderr.""" assert isinstance(message, compat_str) if self.params.get('logger'): self.params['logger'].error(message) else: message = self._bidi_workaround(message) output = message + '\n' self._write_string(output, self._err_file) def to_console_title(self, message): if not self.params.get('consoletitle', False): return if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow(): # c_wchar_p() might not be necessary if `message` is # already of type unicode() ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) elif 'TERM' in os.environ: self._write_string('\033]0;%s\007' % message, self._screen_file) def save_console_title(self): if not self.params.get('consoletitle', False): return if 'TERM' in os.environ: # Save the title on stack self._write_string('\033[22;0t', self._screen_file) def restore_console_title(self): if not self.params.get('consoletitle', False): return if 'TERM' in os.environ: # Restore the title from stack self._write_string('\033[23;0t', self._screen_file) def __enter__(self): self.save_console_title() return self def __exit__(self, *args): self.restore_console_title() if self.params.get('cookiefile') is not None: self.cookiejar.save() def trouble(self, message=None, tb=None): """Determine action to take when a download problem appears. Depending on if the downloader has been configured to ignore download errors or not, this method may throw an exception or not when errors are found, after printing the message. tb, if given, is additional traceback information. """ if message is not None: self.to_stderr(message) if self.params.get('verbose'): if tb is None: if sys.exc_info()[0]: # if .trouble has been called from an except block tb = '' if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info)) tb += compat_str(traceback.format_exc()) else: tb_data = traceback.format_list(traceback.extract_stack()) tb = ''.join(tb_data) self.to_stderr(tb) if not self.params.get('ignoreerrors', False): if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: exc_info = sys.exc_info()[1].exc_info else: exc_info = sys.exc_info() raise DownloadError(message, exc_info) self._download_retcode = 1 def report_warning(self, message): ''' Print the message to stderr, it will be prefixed with 'WARNING:' If stderr is a tty file the 'WARNING:' will be colored ''' if self.params.get('logger') is not None: self.params['logger'].warning(message) else: if self.params.get('no_warnings'): return if not self.params.get('no_color') and self._err_file.isatty() and os.name != 'nt': _msg_header = '\033[0;33mWARNING:\033[0m' else: _msg_header = 'WARNING:' warning_message = '%s %s' % (_msg_header, message) self.to_stderr(warning_message) def report_error(self, message, tb=None): ''' Do the same as trouble, but prefixes the message with 'ERROR:', colored in red if stderr is a tty file. ''' if not self.params.get('no_color') and self._err_file.isatty() and os.name != 'nt': _msg_header = '\033[0;31mERROR:\033[0m' else: _msg_header = 'ERROR:' error_message = '%s %s' % (_msg_header, message) self.trouble(error_message, tb) def report_file_already_downloaded(self, file_name): """Report file has already been fully downloaded.""" try: self.to_screen('[download] %s has already been downloaded' % file_name) except UnicodeEncodeError: self.to_screen('[download] The file has already been downloaded') def prepare_filename(self, info_dict): """Generate the output filename.""" try: template_dict = dict(info_dict) template_dict['epoch'] = int(time.time()) autonumber_size = self.params.get('autonumber_size') if autonumber_size is None: autonumber_size = 5 autonumber_templ = '%0' + str(autonumber_size) + 'd' template_dict['autonumber'] = autonumber_templ % self._num_downloads if template_dict.get('playlist_index') is not None: template_dict['playlist_index'] = '%0*d' % (len(str(template_dict['n_entries'])), template_dict['playlist_index']) if template_dict.get('resolution') is None: if template_dict.get('width') and template_dict.get('height'): template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height']) elif template_dict.get('height'): template_dict['resolution'] = '%sp' % template_dict['height'] elif template_dict.get('width'): template_dict['resolution'] = '?x%d' % template_dict['width'] sanitize = lambda k, v: sanitize_filename( compat_str(v), restricted=self.params.get('restrictfilenames'), is_id=(k == 'id')) template_dict = dict((k, sanitize(k, v)) for k, v in template_dict.items() if v is not None) template_dict = collections.defaultdict(lambda: 'NA', template_dict) outtmpl = sanitize_path(self.params.get('outtmpl', DEFAULT_OUTTMPL)) tmpl = compat_expanduser(outtmpl) filename = tmpl % template_dict # Temporary fix for #4787 # 'Treat' all problem characters by passing filename through preferredencoding # to workaround encoding issues with subprocess on python2 @ Windows if sys.version_info < (3, 0) and sys.platform == 'win32': filename = encodeFilename(filename, True).decode(preferredencoding()) return filename except ValueError as err: self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')') return None def _match_entry(self, info_dict, incomplete): """ Returns None iff the file should be downloaded """ video_title = info_dict.get('title', info_dict.get('id', 'video')) if 'title' in info_dict: # This can happen when we're just evaluating the playlist title = info_dict['title'] matchtitle = self.params.get('matchtitle', False) if matchtitle: if not re.search(matchtitle, title, re.IGNORECASE): return '"' + title + '" title did not match pattern "' + matchtitle + '"' rejecttitle = self.params.get('rejecttitle', False) if rejecttitle: if re.search(rejecttitle, title, re.IGNORECASE): return '"' + title + '" title matched reject pattern "' + rejecttitle + '"' date = info_dict.get('upload_date', None) if date is not None: dateRange = self.params.get('daterange', DateRange()) if date not in dateRange: return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange) view_count = info_dict.get('view_count', None) if view_count is not None: min_views = self.params.get('min_views') if min_views is not None and view_count < min_views: return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views) max_views = self.params.get('max_views') if max_views is not None and view_count > max_views: return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views) if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')): return 'Skipping "%s" because it is age restricted' % video_title if self.in_download_archive(info_dict): return '%s has already been recorded in archive' % video_title if not incomplete: match_filter = self.params.get('match_filter') if match_filter is not None: ret = match_filter(info_dict) if ret is not None: return ret return None @staticmethod def add_extra_info(info_dict, extra_info): '''Set the keys from extra_info in info dict if they are missing''' for key, value in extra_info.items(): info_dict.setdefault(key, value) def extract_info(self, url, download=True, ie_key=None, extra_info={}, process=True, force_generic_extractor=False): ''' Returns a list with a dictionary for each video we find. If 'download', also downloads the videos. extra_info is a dict containing the extra values to add to each result ''' if not ie_key and force_generic_extractor: ie_key = 'Generic' if ie_key: ies = [self.get_info_extractor(ie_key)] else: ies = self._ies for ie in ies: if not ie.suitable(url): continue if not ie.working(): self.report_warning('The program functionality for this site has been marked as broken, ' 'and will probably not work.') try: ie_result = ie.extract(url) if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here) break if isinstance(ie_result, list): # Backwards compatibility: old IE result format ie_result = { '_type': 'compat_list', 'entries': ie_result, } self.add_default_extra_info(ie_result, ie, url) if process: return self.process_ie_result(ie_result, download, extra_info) else: return ie_result except ExtractorError as de: # An error we somewhat expected self.report_error(compat_str(de), de.format_traceback()) break except MaxDownloadsReached: raise except Exception as e: if self.params.get('ignoreerrors', False): self.report_error(compat_str(e), tb=compat_str(traceback.format_exc())) break else: raise else: self.report_error('no suitable InfoExtractor for URL %s' % url) def add_default_extra_info(self, ie_result, ie, url): self.add_extra_info(ie_result, { 'extractor': ie.IE_NAME, 'webpage_url': url, 'webpage_url_basename': url_basename(url), 'extractor_key': ie.ie_key(), }) def process_ie_result(self, ie_result, download=True, extra_info={}): """ Take the result of the ie(may be modified) and resolve all unresolved references (URLs, playlist items). It will also download the videos if 'download'. Returns the resolved ie_result. """ result_type = ie_result.get('_type', 'video') if result_type in ('url', 'url_transparent'): extract_flat = self.params.get('extract_flat', False) if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or extract_flat is True): if self.params.get('forcejson', False): self.to_stdout(json.dumps(ie_result)) return ie_result if result_type == 'video': self.add_extra_info(ie_result, extra_info) return self.process_video_result(ie_result, download=download) elif result_type == 'url': # We have to add extra_info to the results because it may be # contained in a playlist return self.extract_info(ie_result['url'], download, ie_key=ie_result.get('ie_key'), extra_info=extra_info) elif result_type == 'url_transparent': # Use the information from the embedding page info = self.extract_info( ie_result['url'], ie_key=ie_result.get('ie_key'), extra_info=extra_info, download=False, process=False) force_properties = dict( (k, v) for k, v in ie_result.items() if v is not None) for f in ('_type', 'url'): if f in force_properties: del force_properties[f] new_result = info.copy() new_result.update(force_properties) assert new_result.get('_type') != 'url_transparent' return self.process_ie_result( new_result, download=download, extra_info=extra_info) elif result_type == 'playlist' or result_type == 'multi_video': # We process each entry in the playlist playlist = ie_result.get('title', None) or ie_result.get('id', None) self.to_screen('[download] Downloading playlist: %s' % playlist) playlist_results = [] playliststart = self.params.get('playliststart', 1) - 1 playlistend = self.params.get('playlistend', None) # For backwards compatibility, interpret -1 as whole list if playlistend == -1: playlistend = None playlistitems_str = self.params.get('playlist_items', None) playlistitems = None if playlistitems_str is not None: def iter_playlistitems(format): for string_segment in format.split(','): if '-' in string_segment: start, end = string_segment.split('-') for item in range(int(start), int(end) + 1): yield int(item) else: yield int(string_segment) playlistitems = iter_playlistitems(playlistitems_str) ie_entries = ie_result['entries'] if isinstance(ie_entries, list): n_all_entries = len(ie_entries) if playlistitems: entries = [ ie_entries[i - 1] for i in playlistitems if -n_all_entries <= i - 1 < n_all_entries] else: entries = ie_entries[playliststart:playlistend] n_entries = len(entries) self.to_screen( "[%s] playlist %s: Collected %d video ids (downloading %d of them)" % (ie_result['extractor'], playlist, n_all_entries, n_entries)) elif isinstance(ie_entries, PagedList): if playlistitems: entries = [] for item in playlistitems: entries.extend(ie_entries.getslice( item - 1, item )) else: entries = ie_entries.getslice( playliststart, playlistend) n_entries = len(entries) self.to_screen( "[%s] playlist %s: Downloading %d videos" % (ie_result['extractor'], playlist, n_entries)) else: # iterable if playlistitems: entry_list = list(ie_entries) entries = [entry_list[i - 1] for i in playlistitems] else: entries = list(itertools.islice( ie_entries, playliststart, playlistend)) n_entries = len(entries) self.to_screen( "[%s] playlist %s: Downloading %d videos" % (ie_result['extractor'], playlist, n_entries)) if self.params.get('playlistreverse', False): entries = entries[::-1] for i, entry in enumerate(entries, 1): self.to_screen('[download] Downloading video %s of %s' % (i, n_entries)) extra = { 'n_entries': n_entries, 'playlist': playlist, 'playlist_id': ie_result.get('id'), 'playlist_title': ie_result.get('title'), 'playlist_index': i + playliststart, 'extractor': ie_result['extractor'], 'webpage_url': ie_result['webpage_url'], 'webpage_url_basename': url_basename(ie_result['webpage_url']), 'extractor_key': ie_result['extractor_key'], } reason = self._match_entry(entry, incomplete=True) if reason is not None: self.to_screen('[download] ' + reason) continue entry_result = self.process_ie_result(entry, download=download, extra_info=extra) playlist_results.append(entry_result) ie_result['entries'] = playlist_results return ie_result elif result_type == 'compat_list': self.report_warning( 'Extractor %s returned a compat_list result. ' 'It needs to be updated.' % ie_result.get('extractor')) def _fixup(r): self.add_extra_info( r, { 'extractor': ie_result['extractor'], 'webpage_url': ie_result['webpage_url'], 'webpage_url_basename': url_basename(ie_result['webpage_url']), 'extractor_key': ie_result['extractor_key'], } ) return r ie_result['entries'] = [ self.process_ie_result(_fixup(r), download, extra_info) for r in ie_result['entries'] ] return ie_result else: raise Exception('Invalid result type: %s' % result_type) def _build_format_filter(self, filter_spec): " Returns a function to filter the formats according to the filter_spec " OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>width|height|tbr|abr|vbr|asr|filesize|fps) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?) $ ''' % '|'.join(map(re.escape, OPERATORS.keys()))) m = operator_rex.search(filter_spec) if m: try: comparison_value = int(m.group('value')) except ValueError: comparison_value = parse_filesize(m.group('value')) if comparison_value is None: comparison_value = parse_filesize(m.group('value') + 'B') if comparison_value is None: raise ValueError( 'Invalid value %r in format specification %r' % ( m.group('value'), filter_spec)) op = OPERATORS[m.group('op')] if not m: STR_OPERATORS = { '=': operator.eq, '!=': operator.ne, } str_operator_rex = re.compile(r'''(?x) \s*(?P<key>ext|acodec|vcodec|container|protocol) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)? \s*(?P<value>[a-zA-Z0-9_-]+) \s*$ ''' % '|'.join(map(re.escape, STR_OPERATORS.keys()))) m = str_operator_rex.search(filter_spec) if m: comparison_value = m.group('value') op = STR_OPERATORS[m.group('op')] if not m: raise ValueError('Invalid filter specification %r' % filter_spec) def _filter(f): actual_value = f.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) return _filter def build_format_selector(self, format_spec): def syntax_error(note, start): message = ( 'Invalid format specification: ' '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1])) return SyntaxError(message) PICKFIRST = 'PICKFIRST' MERGE = 'MERGE' SINGLE = 'SINGLE' GROUP = 'GROUP' FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters']) def _parse_filter(tokens): filter_parts = [] for type, string, start, _, _ in tokens: if type == tokenize.OP and string == ']': return ''.join(filter_parts) else: filter_parts.append(string) def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False): selectors = [] current_selector = None for type, string, start, _, _ in tokens: # ENCODING is only defined in python 3.x if type == getattr(tokenize, 'ENCODING', None): continue elif type in [tokenize.NAME, tokenize.NUMBER]: current_selector = FormatSelector(SINGLE, string, []) elif type == tokenize.OP: if string == ')': if not inside_group: # ')' will be handled by the parentheses group tokens.restore_last_token() break elif inside_merge and string in ['/', ',']: tokens.restore_last_token() break elif inside_choice and string == ',': tokens.restore_last_token() break elif string == ',': selectors.append(current_selector) current_selector = None elif string == '/': first_choice = current_selector second_choice = _parse_format_selection(tokens, inside_choice=True) current_selector = None selectors.append(FormatSelector(PICKFIRST, (first_choice, second_choice), [])) elif string == '[': if not current_selector: current_selector = FormatSelector(SINGLE, 'best', []) format_filter = _parse_filter(tokens) current_selector.filters.append(format_filter) elif string == '(': if current_selector: raise syntax_error('Unexpected "("', start) group = _parse_format_selection(tokens, inside_group=True) current_selector = FormatSelector(GROUP, group, []) elif string == '+': video_selector = current_selector audio_selector = _parse_format_selection(tokens, inside_merge=True) current_selector = FormatSelector(MERGE, (video_selector, audio_selector), []) else: raise syntax_error('Operator not recognized: "{0}"'.format(string), start) elif type == tokenize.ENDMARKER: break if current_selector: selectors.append(current_selector) return selectors def _build_selector_function(selector): if isinstance(selector, list): fs = [_build_selector_function(s) for s in selector] def selector_function(formats): for f in fs: for format in f(formats): yield format return selector_function elif selector.type == GROUP: selector_function = _build_selector_function(selector.selector) elif selector.type == PICKFIRST: fs = [_build_selector_function(s) for s in selector.selector] def selector_function(formats): for f in fs: picked_formats = list(f(formats)) if picked_formats: return picked_formats return [] elif selector.type == SINGLE: format_spec = selector.selector def selector_function(formats): if format_spec == 'all': for f in formats: yield f elif format_spec in ['best', 'worst', None]: format_idx = 0 if format_spec == 'worst' else -1 audiovideo_formats = [ f for f in formats if f.get('vcodec') != 'none' and f.get('acodec') != 'none'] if audiovideo_formats: yield audiovideo_formats[format_idx] # for audio only (soundcloud) or video only (imgur) urls, select the best/worst audio format elif (all(f.get('acodec') != 'none' for f in formats) or all(f.get('vcodec') != 'none' for f in formats)): yield formats[format_idx] elif format_spec == 'bestaudio': audio_formats = [ f for f in formats if f.get('vcodec') == 'none'] if audio_formats: yield audio_formats[-1] elif format_spec == 'worstaudio': audio_formats = [ f for f in formats if f.get('vcodec') == 'none'] if audio_formats: yield audio_formats[0] elif format_spec == 'bestvideo': video_formats = [ f for f in formats if f.get('acodec') == 'none'] if video_formats: yield video_formats[-1] elif format_spec == 'worstvideo': video_formats = [ f for f in formats if f.get('acodec') == 'none'] if video_formats: yield video_formats[0] else: extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav'] if format_spec in extensions: filter_f = lambda f: f['ext'] == format_spec else: filter_f = lambda f: f['format_id'] == format_spec matches = list(filter(filter_f, formats)) if matches: yield matches[-1] elif selector.type == MERGE: def _merge(formats_info): format_1, format_2 = [f['format_id'] for f in formats_info] # The first format must contain the video and the # second the audio if formats_info[0].get('vcodec') == 'none': self.report_error('The first format must ' 'contain the video, try using ' '"-f %s+%s"' % (format_2, format_1)) return output_ext = ( formats_info[0]['ext'] if self.params.get('merge_output_format') is None else self.params['merge_output_format']) return { 'requested_formats': formats_info, 'format': '%s+%s' % (formats_info[0].get('format'), formats_info[1].get('format')), 'format_id': '%s+%s' % (formats_info[0].get('format_id'), formats_info[1].get('format_id')), 'width': formats_info[0].get('width'), 'height': formats_info[0].get('height'), 'resolution': formats_info[0].get('resolution'), 'fps': formats_info[0].get('fps'), 'vcodec': formats_info[0].get('vcodec'), 'vbr': formats_info[0].get('vbr'), 'stretched_ratio': formats_info[0].get('stretched_ratio'), 'acodec': formats_info[1].get('acodec'), 'abr': formats_info[1].get('abr'), 'ext': output_ext, } video_selector, audio_selector = map(_build_selector_function, selector.selector) def selector_function(formats): formats = list(formats) for pair in itertools.product(video_selector(formats), audio_selector(formats)): yield _merge(pair) filters = [self._build_format_filter(f) for f in selector.filters] def final_selector(formats): for _filter in filters: formats = list(filter(_filter, formats)) return selector_function(formats) return final_selector stream = io.BytesIO(format_spec.encode('utf-8')) try: tokens = list(compat_tokenize_tokenize(stream.readline)) except tokenize.TokenError: raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec))) class TokenIterator(object): def __init__(self, tokens): self.tokens = tokens self.counter = 0 def __iter__(self): return self def __next__(self): if self.counter >= len(self.tokens): raise StopIteration() value = self.tokens[self.counter] self.counter += 1 return value next = __next__ def restore_last_token(self): self.counter -= 1 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens))) return _build_selector_function(parsed_selector) def _calc_headers(self, info_dict): res = std_headers.copy() add_headers = info_dict.get('http_headers') if add_headers: res.update(add_headers) cookies = self._calc_cookies(info_dict) if cookies: res['Cookie'] = cookies return res def _calc_cookies(self, info_dict): pr = compat_urllib_request.Request(info_dict['url']) self.cookiejar.add_cookie_header(pr) return pr.get_header('Cookie') def process_video_result(self, info_dict, download=True): assert info_dict.get('_type', 'video') == 'video' if 'id' not in info_dict: raise ExtractorError('Missing "id" field in extractor result') if 'title' not in info_dict: raise ExtractorError('Missing "title" field in extractor result') if 'playlist' not in info_dict: # It isn't part of a playlist info_dict['playlist'] = None info_dict['playlist_index'] = None thumbnails = info_dict.get('thumbnails') if thumbnails is None: thumbnail = info_dict.get('thumbnail') if thumbnail: info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}] if thumbnails: thumbnails.sort(key=lambda t: ( t.get('preference'), t.get('width'), t.get('height'), t.get('id'), t.get('url'))) for i, t in enumerate(thumbnails): if 'width' in t and 'height' in t: t['resolution'] = '%dx%d' % (t['width'], t['height']) if t.get('id') is None: t['id'] = '%d' % i if thumbnails and 'thumbnail' not in info_dict: info_dict['thumbnail'] = thumbnails[-1]['url'] if 'display_id' not in info_dict and 'id' in info_dict: info_dict['display_id'] = info_dict['id'] if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None: # Working around out-of-range timestamp values (e.g. negative ones on Windows, # see http://bugs.python.org/issue1646728) try: upload_date = datetime.datetime.utcfromtimestamp(info_dict['timestamp']) info_dict['upload_date'] = upload_date.strftime('%Y%m%d') except (ValueError, OverflowError, OSError): pass if self.params.get('listsubtitles', False): if 'automatic_captions' in info_dict: self.list_subtitles(info_dict['id'], info_dict.get('automatic_captions'), 'automatic captions') self.list_subtitles(info_dict['id'], info_dict.get('subtitles'), 'subtitles') return info_dict['requested_subtitles'] = self.process_subtitles( info_dict['id'], info_dict.get('subtitles'), info_dict.get('automatic_captions')) # We now pick which formats have to be downloaded if info_dict.get('formats') is None: # There's only one format available formats = [info_dict] else: formats = info_dict['formats'] if not formats: raise ExtractorError('No video formats found!') formats_dict = {} # We check that all the formats have the format and format_id fields for i, format in enumerate(formats): if 'url' not in format: raise ExtractorError('Missing "url" key in result (index %d)' % i) if format.get('format_id') is None: format['format_id'] = compat_str(i) format_id = format['format_id'] if format_id not in formats_dict: formats_dict[format_id] = [] formats_dict[format_id].append(format) # Make sure all formats have unique format_id for format_id, ambiguous_formats in formats_dict.items(): if len(ambiguous_formats) > 1: for i, format in enumerate(ambiguous_formats): format['format_id'] = '%s-%d' % (format_id, i) for i, format in enumerate(formats): if format.get('format') is None: format['format'] = '{id} - {res}{note}'.format( id=format['format_id'], res=self.format_resolution(format), note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '', ) # Automatically determine file extension if missing if 'ext' not in format: format['ext'] = determine_ext(format['url']).lower() # Add HTTP headers, so that external programs can use them from the # json output full_format_info = info_dict.copy() full_format_info.update(format) format['http_headers'] = self._calc_headers(full_format_info) # TODO Central sorting goes here if formats[0] is not info_dict: # only set the 'formats' fields if the original info_dict list them # otherwise we end up with a circular reference, the first (and unique) # element in the 'formats' field in info_dict is info_dict itself, # wich can't be exported to json info_dict['formats'] = formats if self.params.get('listformats'): self.list_formats(info_dict) return if self.params.get('list_thumbnails'): self.list_thumbnails(info_dict) return req_format = self.params.get('format') if req_format is None: req_format_list = [] if (self.params.get('outtmpl', DEFAULT_OUTTMPL) != '-' and info_dict['extractor'] in ['youtube', 'ted']): merger = FFmpegMergerPP(self) if merger.available and merger.can_merge(): req_format_list.append('bestvideo+bestaudio') req_format_list.append('best') req_format = '/'.join(req_format_list) format_selector = self.build_format_selector(req_format) formats_to_download = list(format_selector(formats)) if not formats_to_download: raise ExtractorError('requested format not available', expected=True) if download: if len(formats_to_download) > 1: self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download))) for format in formats_to_download: new_info = dict(info_dict) new_info.update(format) self.process_info(new_info) # We update the info dict with the best quality format (backwards compatibility) info_dict.update(formats_to_download[-1]) return info_dict def process_subtitles(self, video_id, normal_subtitles, automatic_captions): """Select the requested subtitles and their format""" available_subs = {} if normal_subtitles and self.params.get('writesubtitles'): available_subs.update(normal_subtitles) if automatic_captions and self.params.get('writeautomaticsub'): for lang, cap_info in automatic_captions.items(): if lang not in available_subs: available_subs[lang] = cap_info if (not self.params.get('writesubtitles') and not self.params.get('writeautomaticsub') or not available_subs): return None if self.params.get('allsubtitles', False): requested_langs = available_subs.keys() else: if self.params.get('subtitleslangs', False): requested_langs = self.params.get('subtitleslangs') elif 'en' in available_subs: requested_langs = ['en'] else: requested_langs = [list(available_subs.keys())[0]] formats_query = self.params.get('subtitlesformat', 'best') formats_preference = formats_query.split('/') if formats_query else [] subs = {} for lang in requested_langs: formats = available_subs.get(lang) if formats is None: self.report_warning('%s subtitles not available for %s' % (lang, video_id)) continue for ext in formats_preference: if ext == 'best': f = formats[-1] break matches = list(filter(lambda f: f['ext'] == ext, formats)) if matches: f = matches[-1] break else: f = formats[-1] self.report_warning( 'No subtitle format found matching "%s" for language %s, ' 'using %s' % (formats_query, lang, f['ext'])) subs[lang] = f return subs def process_info(self, info_dict): """Process a single resolved IE result.""" assert info_dict.get('_type', 'video') == 'video' max_downloads = self.params.get('max_downloads') if max_downloads is not None: if self._num_downloads >= int(max_downloads): raise MaxDownloadsReached() info_dict['fulltitle'] = info_dict['title'] if len(info_dict['title']) > 200: info_dict['title'] = info_dict['title'][:197] + '...' if 'format' not in info_dict: info_dict['format'] = info_dict['ext'] reason = self._match_entry(info_dict, incomplete=False) if reason is not None: self.to_screen('[download] ' + reason) return self._num_downloads += 1 info_dict['_filename'] = filename = self.prepare_filename(info_dict) # Forced printings if self.params.get('forcetitle', False): self.to_stdout(info_dict['fulltitle']) if self.params.get('forceid', False): self.to_stdout(info_dict['id']) if self.params.get('forceurl', False): if info_dict.get('requested_formats') is not None: for f in info_dict['requested_formats']: self.to_stdout(f['url'] + f.get('play_path', '')) else: # For RTMP URLs, also include the playpath self.to_stdout(info_dict['url'] + info_dict.get('play_path', '')) if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None: self.to_stdout(info_dict['thumbnail']) if self.params.get('forcedescription', False) and info_dict.get('description') is not None: self.to_stdout(info_dict['description']) if self.params.get('forcefilename', False) and filename is not None: self.to_stdout(filename) if self.params.get('forceduration', False) and info_dict.get('duration') is not None: self.to_stdout(formatSeconds(info_dict['duration'])) if self.params.get('forceformat', False): self.to_stdout(info_dict['format']) if self.params.get('forcejson', False): self.to_stdout(json.dumps(info_dict)) # Do nothing else if in simulate mode if self.params.get('simulate', False): return if filename is None: return try: dn = os.path.dirname(sanitize_path(encodeFilename(filename))) if dn and not os.path.exists(dn): os.makedirs(dn) except (OSError, IOError) as err: self.report_error('unable to create directory ' + compat_str(err)) return if self.params.get('writedescription', False): descfn = replace_extension(filename, 'description', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)): self.to_screen('[info] Video description is already present') elif info_dict.get('description') is None: self.report_warning('There\'s no description to write.') else: try: self.to_screen('[info] Writing video description to: ' + descfn) with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: descfile.write(info_dict['description']) except (OSError, IOError): self.report_error('Cannot write description file ' + descfn) return if self.params.get('writeannotations', False): annofn = replace_extension(filename, 'annotations.xml', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)): self.to_screen('[info] Video annotations are already present') else: try: self.to_screen('[info] Writing video annotations to: ' + annofn) with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile: annofile.write(info_dict['annotations']) except (KeyError, TypeError): self.report_warning('There are no annotations to write.') except (OSError, IOError): self.report_error('Cannot write annotations file: ' + annofn) return subtitles_are_requested = any([self.params.get('writesubtitles', False), self.params.get('writeautomaticsub')]) if subtitles_are_requested and info_dict.get('requested_subtitles'): # subtitles download errors are already managed as troubles in relevant IE # that way it will silently go on when used with unsupporting IE subtitles = info_dict['requested_subtitles'] ie = self.get_info_extractor(info_dict['extractor_key']) for sub_lang, sub_info in subtitles.items(): sub_format = sub_info['ext'] if sub_info.get('data') is not None: sub_data = sub_info['data'] else: try: sub_data = ie._download_webpage( sub_info['url'], info_dict['id'], note=False) except ExtractorError as err: self.report_warning('Unable to download subtitle for "%s": %s' % (sub_lang, compat_str(err.cause))) continue try: sub_filename = subtitles_filename(filename, sub_lang, sub_format) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)): self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format)) else: self.to_screen('[info] Writing video subtitles to: ' + sub_filename) with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile: subfile.write(sub_data) except (OSError, IOError): self.report_error('Cannot write subtitles file ' + sub_filename) return if self.params.get('writeinfojson', False): infofn = replace_extension(filename, 'info.json', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)): self.to_screen('[info] Video description metadata is already present') else: self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn) try: write_json_file(self.filter_requested_info(info_dict), infofn) except (OSError, IOError): self.report_error('Cannot write metadata to JSON file ' + infofn) return self._write_thumbnails(info_dict, filename) if not self.params.get('skip_download', False): try: def dl(name, info): fd = get_suitable_downloader(info, self.params)(self, self.params) for ph in self._progress_hooks: fd.add_progress_hook(ph) if self.params.get('verbose'): self.to_stdout('[debug] Invoking downloader on %r' % info.get('url')) return fd.download(name, info) if info_dict.get('requested_formats') is not None: downloaded = [] success = True merger = FFmpegMergerPP(self) if not merger.available: postprocessors = [] self.report_warning('You have requested multiple ' 'formats but ffmpeg or avconv are not installed.' ' The formats won\'t be merged.') else: postprocessors = [merger] def compatible_formats(formats): video, audio = formats # Check extension video_ext, audio_ext = audio.get('ext'), video.get('ext') if video_ext and audio_ext: COMPATIBLE_EXTS = ( ('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v'), ('webm') ) for exts in COMPATIBLE_EXTS: if video_ext in exts and audio_ext in exts: return True # TODO: Check acodec/vcodec return False filename_real_ext = os.path.splitext(filename)[1][1:] filename_wo_ext = ( os.path.splitext(filename)[0] if filename_real_ext == info_dict['ext'] else filename) requested_formats = info_dict['requested_formats'] if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats): info_dict['ext'] = 'mkv' self.report_warning( 'Requested formats are incompatible for merge and will be merged into mkv.') # Ensure filename always has a correct extension for successful merge filename = '%s.%s' % (filename_wo_ext, info_dict['ext']) if os.path.exists(encodeFilename(filename)): self.to_screen( '[download] %s has already been downloaded and ' 'merged' % filename) else: for f in requested_formats: new_info = dict(info_dict) new_info.update(f) fname = self.prepare_filename(new_info) fname = prepend_extension(fname, 'f%s' % f['format_id'], new_info['ext']) downloaded.append(fname) partial_success = dl(fname, new_info) success = success and partial_success info_dict['__postprocessors'] = postprocessors info_dict['__files_to_merge'] = downloaded else: # Just a single file success = dl(filename, info_dict) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_error('unable to download video data: %s' % str(err)) return except (OSError, IOError) as err: raise UnavailableVideoError(err) except (ContentTooShortError, ) as err: self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) return if success: # Fixup content fixup_policy = self.params.get('fixup') if fixup_policy is None: fixup_policy = 'detect_or_warn' stretched_ratio = info_dict.get('stretched_ratio') if stretched_ratio is not None and stretched_ratio != 1: if fixup_policy == 'warn': self.report_warning('%s: Non-uniform pixel ratio (%s)' % ( info_dict['id'], stretched_ratio)) elif fixup_policy == 'detect_or_warn': stretched_pp = FFmpegFixupStretchedPP(self) if stretched_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(stretched_pp) else: self.report_warning( '%s: Non-uniform pixel ratio (%s). Install ffmpeg or avconv to fix this automatically.' % ( info_dict['id'], stretched_ratio)) else: assert fixup_policy in ('ignore', 'never') if info_dict.get('requested_formats') is None and info_dict.get('container') == 'm4a_dash': if fixup_policy == 'warn': self.report_warning('%s: writing DASH m4a. Only some players support this container.' % ( info_dict['id'])) elif fixup_policy == 'detect_or_warn': fixup_pp = FFmpegFixupM4aPP(self) if fixup_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(fixup_pp) else: self.report_warning( '%s: writing DASH m4a. Only some players support this container. Install ffmpeg or avconv to fix this automatically.' % ( info_dict['id'])) else: assert fixup_policy in ('ignore', 'never') try: self.post_process(filename, info_dict) except (PostProcessingError) as err: self.report_error('postprocessing: %s' % str(err)) return self.record_download_archive(info_dict) def download(self, url_list): """Download a given list of URLs.""" outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) if (len(url_list) > 1 and '%' not in outtmpl and self.params.get('max_downloads') != 1): raise SameFileError(outtmpl) for url in url_list: try: # It also downloads the videos res = self.extract_info( url, force_generic_extractor=self.params.get('force_generic_extractor', False)) except UnavailableVideoError: self.report_error('unable to download video') except MaxDownloadsReached: self.to_screen('[info] Maximum number of downloaded files reached.') raise else: if self.params.get('dump_single_json', False): self.to_stdout(json.dumps(res)) return self._download_retcode def download_with_info_file(self, info_filename): with contextlib.closing(fileinput.FileInput( [info_filename], mode='r', openhook=fileinput.hook_encoded('utf-8'))) as f: # FileInput doesn't have a read method, we can't call json.load info = self.filter_requested_info(json.loads('\n'.join(f))) try: self.process_ie_result(info, download=True) except DownloadError: webpage_url = info.get('webpage_url') if webpage_url is not None: self.report_warning('The info failed to download, trying with "%s"' % webpage_url) return self.download([webpage_url]) else: raise return self._download_retcode @staticmethod def filter_requested_info(info_dict): return dict( (k, v) for k, v in info_dict.items() if k not in ['requested_formats', 'requested_subtitles']) def post_process(self, filename, ie_info): """Run all the postprocessors on the given file.""" info = dict(ie_info) info['filepath'] = filename pps_chain = [] if ie_info.get('__postprocessors') is not None: pps_chain.extend(ie_info['__postprocessors']) pps_chain.extend(self._pps) for pp in pps_chain: files_to_delete = [] try: files_to_delete, info = pp.run(info) except PostProcessingError as e: self.report_error(e.msg) if files_to_delete and not self.params.get('keepvideo', False): for old_filename in files_to_delete: self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename) try: os.remove(encodeFilename(old_filename)) except (IOError, OSError): self.report_warning('Unable to remove downloaded original file') def _make_archive_id(self, info_dict): # Future-proof against any change in case # and backwards compatibility with prior versions extractor = info_dict.get('extractor_key') if extractor is None: if 'id' in info_dict: extractor = info_dict.get('ie_key') # key in a playlist if extractor is None: return None # Incomplete video information return extractor.lower() + ' ' + info_dict['id'] def in_download_archive(self, info_dict): fn = self.params.get('download_archive') if fn is None: return False vid_id = self._make_archive_id(info_dict) if vid_id is None: return False # Incomplete video information try: with locked_file(fn, 'r', encoding='utf-8') as archive_file: for line in archive_file: if line.strip() == vid_id: return True except IOError as ioe: if ioe.errno != errno.ENOENT: raise return False def record_download_archive(self, info_dict): fn = self.params.get('download_archive') if fn is None: return vid_id = self._make_archive_id(info_dict) assert vid_id with locked_file(fn, 'a', encoding='utf-8') as archive_file: archive_file.write(vid_id + '\n') @staticmethod def format_resolution(format, default='unknown'): if format.get('vcodec') == 'none': return 'audio only' if format.get('resolution') is not None: return format['resolution'] if format.get('height') is not None: if format.get('width') is not None: res = '%sx%s' % (format['width'], format['height']) else: res = '%sp' % format['height'] elif format.get('width') is not None: res = '?x%d' % format['width'] else: res = default return res def _format_note(self, fdict): res = '' if fdict.get('ext') in ['f4f', 'f4m']: res += '(unsupported) ' if fdict.get('format_note') is not None: res += fdict['format_note'] + ' ' if fdict.get('tbr') is not None: res += '%4dk ' % fdict['tbr'] if fdict.get('container') is not None: if res: res += ', ' res += '%s container' % fdict['container'] if (fdict.get('vcodec') is not None and fdict.get('vcodec') != 'none'): if res: res += ', ' res += fdict['vcodec'] if fdict.get('vbr') is not None: res += '@' elif fdict.get('vbr') is not None and fdict.get('abr') is not None: res += 'video@' if fdict.get('vbr') is not None: res += '%4dk' % fdict['vbr'] if fdict.get('fps') is not None: res += ', %sfps' % fdict['fps'] if fdict.get('acodec') is not None: if res: res += ', ' if fdict['acodec'] == 'none': res += 'video only' else: res += '%-5s' % fdict['acodec'] elif fdict.get('abr') is not None: if res: res += ', ' res += 'audio' if fdict.get('abr') is not None: res += '@%3dk' % fdict['abr'] if fdict.get('asr') is not None: res += ' (%5dHz)' % fdict['asr'] if fdict.get('filesize') is not None: if res: res += ', ' res += format_bytes(fdict['filesize']) elif fdict.get('filesize_approx') is not None: if res: res += ', ' res += '~' + format_bytes(fdict['filesize_approx']) return res def list_formats(self, info_dict): formats = info_dict.get('formats', [info_dict]) table = [ [f['format_id'], f['ext'], self.format_resolution(f), self._format_note(f)] for f in formats if f.get('preference') is None or f['preference'] >= -1000] if len(formats) > 1: table[-1][-1] += (' ' if table[-1][-1] else '') + '(best)' header_line = ['format code', 'extension', 'resolution', 'note'] self.to_screen( '[info] Available formats for %s:\n%s' % (info_dict['id'], render_table(header_line, table))) def list_thumbnails(self, info_dict): thumbnails = info_dict.get('thumbnails') if not thumbnails: tn_url = info_dict.get('thumbnail') if tn_url: thumbnails = [{'id': '0', 'url': tn_url}] else: self.to_screen( '[info] No thumbnails present for %s' % info_dict['id']) return self.to_screen( '[info] Thumbnails for %s:' % info_dict['id']) self.to_screen(render_table( ['ID', 'width', 'height', 'URL'], [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails])) def list_subtitles(self, video_id, subtitles, name='subtitles'): if not subtitles: self.to_screen('%s has no %s' % (video_id, name)) return self.to_screen( 'Available %s for %s:' % (name, video_id)) self.to_screen(render_table( ['Language', 'formats'], [[lang, ', '.join(f['ext'] for f in reversed(formats))] for lang, formats in subtitles.items()])) def urlopen(self, req): """ Start an HTTP download """ # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one req_is_string = isinstance(req, compat_basestring) url = req if req_is_string else req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: if req_is_string: req = url_escaped else: req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request req = req_type( url_escaped, data=req.data, headers=req.headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) return self._opener.open(req, timeout=self._socket_timeout) def print_debug_header(self): if not self.params.get('verbose'): return if type('') is not compat_str: # Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326) self.report_warning( 'Your Python is broken! Update to a newer and supported version') stdout_encoding = getattr( sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__) encoding_str = ( '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % ( locale.getpreferredencoding(), sys.getfilesystemencoding(), stdout_encoding, self.get_encoding())) write_string(encoding_str, encoding=None) self._write_string('[debug] youtube-dl version ' + __version__ + '\n') try: sp = subprocess.Popen( ['git', 'rev-parse', '--short', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=os.path.dirname(os.path.abspath(__file__))) out, err = sp.communicate() out = out.decode().strip() if re.match('[0-9a-f]+', out): self._write_string('[debug] Git HEAD: ' + out + '\n') except Exception: try: sys.exc_clear() except Exception: pass self._write_string('[debug] Python version %s - %s\n' % ( platform.python_version(), platform_name())) exe_versions = FFmpegPostProcessor.get_versions(self) exe_versions['rtmpdump'] = rtmpdump_version() exe_str = ', '.join( '%s %s' % (exe, v) for exe, v in sorted(exe_versions.items()) if v ) if not exe_str: exe_str = 'none' self._write_string('[debug] exe versions: %s\n' % exe_str) proxy_map = {} for handler in self._opener.handlers: if hasattr(handler, 'proxies'): proxy_map.update(handler.proxies) self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n') if self.params.get('call_home', False): ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8') self._write_string('[debug] Public IP address: %s\n' % ipaddr) latest_version = self.urlopen( 'https://yt-dl.org/latest/version').read().decode('utf-8') if version_tuple(latest_version) > version_tuple(__version__): self.report_warning( 'You are using an outdated version (newest version: %s)! ' 'See https://yt-dl.org/update if you need help updating.' % latest_version) def _setup_opener(self): timeout_val = self.params.get('socket_timeout') self._socket_timeout = 600 if timeout_val is None else float(timeout_val) opts_cookiefile = self.params.get('cookiefile') opts_proxy = self.params.get('proxy') if opts_cookiefile is None: self.cookiejar = compat_cookiejar.CookieJar() else: self.cookiejar = compat_cookiejar.MozillaCookieJar( opts_cookiefile) if os.access(opts_cookiefile, os.R_OK): self.cookiejar.load() cookie_processor = compat_urllib_request.HTTPCookieProcessor( self.cookiejar) if opts_proxy is not None: if opts_proxy == '': proxies = {} else: proxies = {'http': opts_proxy, 'https': opts_proxy} else: proxies = compat_urllib_request.getproxies() # Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805) if 'http' in proxies and 'https' not in proxies: proxies['https'] = proxies['http'] proxy_handler = PerRequestProxyHandler(proxies) debuglevel = 1 if self.params.get('debug_printtraffic') else 0 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel) ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel) opener = compat_urllib_request.build_opener( proxy_handler, https_handler, cookie_processor, ydlh) # Delete the default user-agent header, which would otherwise apply in # cases where our custom HTTP handler doesn't come into play # (See https://github.com/rg3/youtube-dl/issues/1309 for details) opener.addheaders = [] self._opener = opener def encode(self, s): if isinstance(s, bytes): return s # Already encoded try: return s.encode(self.get_encoding()) except UnicodeEncodeError as err: err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.' raise def get_encoding(self): encoding = self.params.get('encoding') if encoding is None: encoding = preferredencoding() return encoding def _write_thumbnails(self, info_dict, filename): if self.params.get('writethumbnail', False): thumbnails = info_dict.get('thumbnails') if thumbnails: thumbnails = [thumbnails[-1]] elif self.params.get('write_all_thumbnails', False): thumbnails = info_dict.get('thumbnails') else: return if not thumbnails: # No thumbnails present, so return immediately return for t in thumbnails: thumb_ext = determine_ext(t['url'], 'jpg') suffix = '_%s' % t['id'] if len(thumbnails) > 1 else '' thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else '' t['filename'] = thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)): self.to_screen('[%s] %s: Thumbnail %sis already present' % (info_dict['extractor'], info_dict['id'], thumb_display_id)) else: self.to_screen('[%s] %s: Downloading thumbnail %s...' % (info_dict['extractor'], info_dict['id'], thumb_display_id)) try: uf = self.urlopen(t['url']) with open(thumb_filename, 'wb') as thumbf: shutil.copyfileobj(uf, thumbf) self.to_screen('[%s] %s: Writing thumbnail %sto: %s' % (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename)) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_warning('Unable to download thumbnail "%s": %s' % (t['url'], compat_str(err)))
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-9-fixed/youtube-dl/youtube_dl/YoutubeDL.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-9-buggy/youtube-dl/youtube_dl/YoutubeDL.py
youtube-dl-bug-26
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals import base64 import binascii import calendar import codecs import contextlib import ctypes import datetime import email.utils import errno import functools import gzip import io import itertools import json import locale import math import operator import os import pipes import platform import re import socket import ssl import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_HTMLParser, compat_basestring, compat_chr, compat_etree_fromstring, compat_html_entities, compat_html_entities_html5, compat_http_client, compat_kwargs, compat_parse_qs, compat_shlex_quote, compat_socket_create_connection, compat_str, compat_struct_pack, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, compat_urllib_parse_unquote_plus, compat_urllib_request, compat_urlparse, compat_xpath, ) from .socks import ( ProxyType, sockssocket, ) def register_socks_protocols(): # "Register" SOCKS protocols # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904 # URLs with protocols not in urlparse.uses_netloc are not handled correctly for scheme in ('socks', 'socks4', 'socks4a', 'socks5'): if scheme not in compat_urlparse.uses_netloc: compat_urlparse.uses_netloc.append(scheme) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] KNOWN_EXTENSIONS = ( 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', 'flv', 'f4v', 'f4a', 'f4b', 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', 'mkv', 'mka', 'mk3d', 'avi', 'divx', 'mov', 'asf', 'wmv', 'wma', '3gp', '3g2', 'mp3', 'flac', 'ape', 'wav', 'f4f', 'f4m', 'm3u8', 'smil') # needed for sanitizing filenames in restricted mode ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'], 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy'))) def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): for f in node.findall(compat_xpath(xpath)): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): return node.find(compat_xpath(xpath)) if isinstance(xpath, (str, compat_str)): n = _find_xpath(xpath) else: for xp in xpath: n = _find_xpath(xp) if n is not None: break if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute('id', id, html) def get_element_by_attribute(attribute, value, html): """Return the content of the tag with the specified attribute in the passed HTML document""" m = re.search(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), re.escape(value)), html) if not m: return None res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] return unescapeHTML(res) class HTMLAttributeParser(compat_HTMLParser): """Trivial HTML parser to gather the attributes for a single element""" def __init__(self): self.attrs = {} compat_HTMLParser.__init__(self) def handle_starttag(self, tag, attrs): self.attrs = dict(attrs) def extract_attributes(html_element): """Given a string for an HTML element such as <el a="foo" B="bar" c="&98;az" d=boz empty= noval entity="&amp;" sq='"' dq="'" > Decode and return a dictionary of attributes. { 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz', 'empty': '', 'noval': None, 'entity': '&', 'sq': '"', 'dq': '\'' }. NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions, but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5. """ parser = HTMLAttributeParser() parser.feed(html_element) parser.close() return parser.attrs def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if restricted and char in ACCENT_CHARS: return ACCENT_CHARS[char] if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of # unwanted failures due to missing protocol def sanitize_url(url): return 'http:%s' % url if url.startswith('//') else url def sanitized_Request(url, *args, **kwargs): return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity_with_semicolon): """Transforms an HTML entity to a character.""" entity = entity_with_semicolon[:-1] # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) # TODO: HTML5 allows entities without a semicolon. For example, # '&Eacuteric' should be decoded as 'Éric'. if entity_with_semicolon in compat_html_entities_html5: return compat_html_entities_html5[entity_with_semicolon] mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 # See https://github.com/rg3/youtube-dl/issues/7518 try: return compat_chr(int(numstr, base)) except ValueError: pass # Unknown entity in name, return its literal representation return '&%s;' % entity def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+;)', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible if sys.platform.startswith('java'): return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): # Both in bytes self.downloaded = downloaded self.expected = expected def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs[b'strict'] = True hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc def handle_youtubedl_headers(headers): filtered_headers = headers if 'Youtubedl-no-compression' in filtered_headers: filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') del filtered_headers['Youtubedl-no-compression'] return filtered_headers class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-no-compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): conn_class = compat_http_client.HTTPConnection socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req = update_Request(req, url=url_escaped) for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) req.headers = handle_youtubedl_headers(req.headers) if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') else: location = location.decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] if sys.version_info < (3, 0): location_escaped = location_escaped.encode('utf-8') resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response def make_socks_conn_class(base_class, socks_proxy): assert issubclass(base_class, ( compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection)) url_components = compat_urlparse.urlparse(socks_proxy) if url_components.scheme.lower() == 'socks5': socks_type = ProxyType.SOCKS5 elif url_components.scheme.lower() in ('socks', 'socks4'): socks_type = ProxyType.SOCKS4 elif url_components.scheme.lower() == 'socks4a': socks_type = ProxyType.SOCKS4A def unquote_if_non_empty(s): if not s: return s return compat_urllib_parse_unquote_plus(s) proxy_args = ( socks_type, url_components.hostname, url_components.port or 1080, True, # Remote DNS unquote_if_non_empty(url_components.username), unquote_if_non_empty(url_components.password), ) class SocksConnection(base_class): def connect(self): self.sock = sockssocket() self.sock.setproxy(*proxy_args) if type(self.timeout) in (int, float): self.sock.settimeout(self.timeout) self.sock.connect((self.host, self.port)) if isinstance(self, compat_http_client.HTTPSConnection): if hasattr(self, '_context'): # Python > 2.6 self.sock = self._context.wrap_socket( self.sock, server_hostname=self.host) else: self.sock = ssl.wrap_socket(self.sock) return SocksConnection class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} conn_class = self._https_conn_class if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, True), req, **kwargs) class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: m = re.search( r'(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group(0))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) try: date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) except ValueError: pass def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # %z (UTC offset) is only supported in python>=3.2 if not re.match(r'^[0-9]{1,2}-[0-9]{1,2}-[0-9]{4}$', date_str): date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) format_expressions = [ '%d %B %Y', '%d %b %Y', '%B %d %Y', '%b %d %Y', '%b %dst %Y %I:%M', '%b %dnd %Y %I:%M', '%b %dth %Y %I:%M', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', ] if day_first: format_expressions.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d.%m.%y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) else: format_expressions.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) for expression in format_expressions: try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: try: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') except ValueError: pass if upload_date is not None: return compat_str(upload_date) def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download elif guess.rstrip('/') in KNOWN_EXTENSIONS: return guess.rstrip('/') else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad approximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, '%Y%m%d').date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( (b'GetStdHandle', ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( (b'GetConsoleMode', ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return compat_struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: # Some platforms, such as Jython, is missing fcntl try: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) except ImportError: UNSUPPORTED_MSG = 'file locking is not supported on this platform' def _lock_file(f, exclusive): raise IOError(UNSUPPORTED_MSG) def _unlock_file(f): raise IOError(UNSUPPORTED_MSG) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ sdata = compat_urllib_parse_urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def lookup_unit_table(unit_table, s): units_re = '|'.join(re.escape(u) for u in unit_table) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = unit_table[m.group('unit')] return int(float(num_str) * mult) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and unofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, } return lookup_unit_table(_UNIT_TABLE, s) def parse_count(s): if s is None: return None s = s.strip() if re.match(r'^[\d,.]+$', s): return str_to_int(s) _UNIT_TABLE = { 'k': 1000, 'K': 1000, 'm': 1000 ** 2, 'M': 1000 ** 2, 'kk': 1000 ** 2, 'KK': 1000 ** 2, } return lookup_unit_table(_UNIT_TABLE, s) def month_by_name(name): """ Return the number of a month by (locale-independently) English name """ try: return ENGLISH_MONTH_NAMES.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) # ctypes in Jython is not complete # http://bugs.jython.org/issue2148 if sys.platform.startswith('java'): return try: libc = ctypes.cdll.LoadLibrary('libc.so.6') except OSError: return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): return s[len(start):] if s is not None and s.startswith(start) else s def remove_end(s, end): return s[:-len(end)] if s is not None and s.endswith(end) else s def remove_quotes(s): if s is None or len(s) < 2: return s for quote in ('"', "'", ): if s[0] == quote and s[-1] == quote: return s[1:-1] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] class HEADRequest(compat_urllib_request.Request): def get_method(self): return 'HEAD' def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() days, hours, mins, secs, ms = [None] * 5 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?$', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match( r'''(?ix)(?:P?T)? (?: (?P<days>[0-9]+)\s*d(?:ays?)?\s* )? (?: (?P<hours>[0-9]+)\s*h(?:ours?)?\s* )? (?: (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s* )? (?: (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s* )?$''', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)$', s) if m: hours, mins = m.groups() else: return None duration = 0 if secs: duration += float(secs) if mins: duration += float(mins) * 60 if hours: duration += float(hours) * 60 * 60 if days: duration += float(days) * 24 * 60 * 60 if ms: duration += float(ms) return duration def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize, use_cache=False): self._pagefunc = pagefunc self._pagesize = pagesize self._use_cache = use_cache if use_cache: self._cache = {} def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = None if self._use_cache: page_results = self._cache.get(pagenum) if page_results is None: page_results = list(self._pagefunc(pagenum)) if self._use_cache: self._cache[pagenum] = page_results startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( netloc=url_parsed.netloc.encode('idna').decode('ascii'), path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii') def update_url_query(url, query): if not query: return url parsed_url = compat_urlparse.urlparse(url) qs = compat_parse_qs(parsed_url.query) qs.update(query) return compat_urlparse.urlunparse(parsed_url._replace( query=compat_urllib_parse_urlencode(qs, True))) def update_Request(req, url=None, data=None, headers={}, query={}): req_headers = req.headers.copy() req_headers.update(headers) req_data = data or req.data req_url = update_url_query(url or req.get_full_url(), query) req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request new_req = req_type( req_url, data=req_data, headers=req_headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) if hasattr(req, 'timeout'): new_req.timeout = req.timeout return new_req def dict_get(d, key_or_keys, default=None, skip_false_values=True): if isinstance(key_or_keys, (list, tuple)): for key in key_or_keys: if key not in d or d[key] is None or skip_false_values and not d[key]: continue return d[key] return default return d.get(key_or_keys, default) def try_get(src, getter, expected_type=None): try: v = getter(src) except (AttributeError, KeyError, TypeError, IndexError): pass else: if expected_type is None or isinstance(v, expected_type): return v def encode_compat_str(string, encoding=preferredencoding(), errors='strict'): return string if isinstance(string, compat_str) else compat_str(string, encoding, errors) US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } def parse_age_limit(s): if s is None: return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) return int(m.group('age')) if m else US_RATINGS.get(s) def strip_jsonp(code): return re.sub( r'(?s)^[a-zA-Z0-9_.$]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code) def js_to_json(code): def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v elif v.startswith('/*') or v == ',': return "" if v[0] in ("'", '"'): v = re.sub(r'(?s)\\.|"', lambda m: { '"': '\\"', "\\'": "'", '\\\n': '', '\\x': '\\u00', }.get(m.group(0), m.group(0)), v[1:-1]) INTEGER_TABLE = ( (r'^0[xX][0-9a-fA-F]+', 16), (r'^0+[0-7]+', 8), ) for regex, base in INTEGER_TABLE: im = re.match(regex, v) if im: i = int(im.group(0), base) return '"%d":' % i if v.endswith(':') else '%d' % i return '"%s"' % v return re.sub(r'''(?sx) "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'| /\*.*?\*/|,(?=\s*[\]}])| [a-zA-Z_][.a-zA-Z_0-9]*| (?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:\s*:)?| [0-9]+(?=\s*:) ''', fix_kv, code) def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(compat_shlex_quote(a) for a in args) def error_to_compat_str(err): err_str = str(err) # On python 2 error byte string must be decoded with proper # encoding rather than ascii if sys.version_info[0] < 3: err_str = err_str.decode(preferredencoding()) return err_str def mimetype2ext(mt): if mt is None: return None ext = { 'audio/mp4': 'm4a', # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as # it's the most popular one 'audio/mpeg': 'mp3', }.get(mt) if ext is not None: return ext _, _, res = mt.rpartition('/') return { '3gpp': '3gp', 'smptett+xml': 'tt', 'srt': 'srt', 'ttaf+xml': 'dfxp', 'ttml+xml': 'ttml', 'vtt': 'vtt', 'x-flv': 'flv', 'x-mp4-fragmented': 'mp4', 'x-ms-wmv': 'wmv', }.get(res, res) def urlhandle_detect_ext(url_handle): getheader = url_handle.headers.get cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] if m.group('strval') is not None: if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('strval') else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) actual_value = dct.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.')) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): _x = functools.partial(xpath_with_ns, ns_map={ 'ttml': 'http://www.w3.org/ns/ttml', 'ttaf1': 'http://www.w3.org/2006/10/ttaf1', 'ttaf1_0604': 'http://www.w3.org/2006/04/ttaf1', }) class TTMLPElementParser(object): out = '' def start(self, tag, attrib): if tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'): self.out += '\n' def end(self, tag): pass def data(self, data): self.out += data def close(self): return self.out.strip() def parse_node(node): target = TTMLPElementParser() parser = xml.etree.ElementTree.XMLParser(target=target) parser.feed(xml.etree.ElementTree.tostring(node)) return parser.close() dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8')) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall(_x('.//ttaf1_0604:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) dur = parse_dfxp_time_expr(para.attrib.get('dur')) if begin_time is None: continue if not end_time: if not dur: continue end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) return compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'): req.add_header('Ytdl-socks-proxy', proxy) # youtube-dl's http/https handlers do wrapping the socket with socks return None return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type) def ohdave_rsa_encrypt(data, exponent, modulus): ''' Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/ Input: data: data to encrypt, bytes-like object exponent, modulus: parameter e and N of RSA algorithm, both integer Output: hex string of encrypted data Limitation: supports one block encryption only ''' payload = int(binascii.hexlify(data[::-1]), 16) encrypted = pow(payload, exponent, modulus) return '%x' % encrypted def encode_base_n(num, n, table=None): FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' if not table: table = FULL_TABLE[:n] if n > len(table): raise ValueError('base %d exceeds table length %d' % (n, len(table))) if num == 0: return table[0] ret = '' while num: ret = table[num % n] + ret num = num // n return ret def decode_packed_codes(code): mobj = re.search( r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)", code) obfucasted_code, base, count, symbols = mobj.groups() base = int(base) count = int(count) symbols = symbols.split('|') symbol_table = {} while count: count -= 1 base_n_count = encode_base_n(count, base) symbol_table[base_n_count] = symbols[count] or base_n_count return re.sub( r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)], obfucasted_code) #!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals import base64 import binascii import calendar import codecs import contextlib import ctypes import datetime import email.utils import errno import functools import gzip import io import itertools import json import locale import math import operator import os import pipes import platform import re import socket import ssl import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_HTMLParser, compat_basestring, compat_chr, compat_etree_fromstring, compat_html_entities, compat_html_entities_html5, compat_http_client, compat_kwargs, compat_parse_qs, compat_shlex_quote, compat_socket_create_connection, compat_str, compat_struct_pack, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, compat_urllib_parse_unquote_plus, compat_urllib_request, compat_urlparse, compat_xpath, ) from .socks import ( ProxyType, sockssocket, ) def register_socks_protocols(): # "Register" SOCKS protocols # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904 # URLs with protocols not in urlparse.uses_netloc are not handled correctly for scheme in ('socks', 'socks4', 'socks4a', 'socks5'): if scheme not in compat_urlparse.uses_netloc: compat_urlparse.uses_netloc.append(scheme) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] KNOWN_EXTENSIONS = ( 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', 'flv', 'f4v', 'f4a', 'f4b', 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', 'mkv', 'mka', 'mk3d', 'avi', 'divx', 'mov', 'asf', 'wmv', 'wma', '3gp', '3g2', 'mp3', 'flac', 'ape', 'wav', 'f4f', 'f4m', 'm3u8', 'smil') # needed for sanitizing filenames in restricted mode ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'], 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy'))) def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): for f in node.findall(compat_xpath(xpath)): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): return node.find(compat_xpath(xpath)) if isinstance(xpath, (str, compat_str)): n = _find_xpath(xpath) else: for xp in xpath: n = _find_xpath(xp) if n is not None: break if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute('id', id, html) def get_element_by_attribute(attribute, value, html): """Return the content of the tag with the specified attribute in the passed HTML document""" m = re.search(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), re.escape(value)), html) if not m: return None res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] return unescapeHTML(res) class HTMLAttributeParser(compat_HTMLParser): """Trivial HTML parser to gather the attributes for a single element""" def __init__(self): self.attrs = {} compat_HTMLParser.__init__(self) def handle_starttag(self, tag, attrs): self.attrs = dict(attrs) def extract_attributes(html_element): """Given a string for an HTML element such as <el a="foo" B="bar" c="&98;az" d=boz empty= noval entity="&amp;" sq='"' dq="'" > Decode and return a dictionary of attributes. { 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz', 'empty': '', 'noval': None, 'entity': '&', 'sq': '"', 'dq': '\'' }. NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions, but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5. """ parser = HTMLAttributeParser() parser.feed(html_element) parser.close() return parser.attrs def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if restricted and char in ACCENT_CHARS: return ACCENT_CHARS[char] if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of # unwanted failures due to missing protocol def sanitize_url(url): return 'http:%s' % url if url.startswith('//') else url def sanitized_Request(url, *args, **kwargs): return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity_with_semicolon): """Transforms an HTML entity to a character.""" entity = entity_with_semicolon[:-1] # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) # TODO: HTML5 allows entities without a semicolon. For example, # '&Eacuteric' should be decoded as 'Éric'. if entity_with_semicolon in compat_html_entities_html5: return compat_html_entities_html5[entity_with_semicolon] mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 # See https://github.com/rg3/youtube-dl/issues/7518 try: return compat_chr(int(numstr, base)) except ValueError: pass # Unknown entity in name, return its literal representation return '&%s;' % entity def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+;)', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible if sys.platform.startswith('java'): return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): # Both in bytes self.downloaded = downloaded self.expected = expected def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs[b'strict'] = True hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc def handle_youtubedl_headers(headers): filtered_headers = headers if 'Youtubedl-no-compression' in filtered_headers: filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') del filtered_headers['Youtubedl-no-compression'] return filtered_headers class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-no-compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): conn_class = compat_http_client.HTTPConnection socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req = update_Request(req, url=url_escaped) for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) req.headers = handle_youtubedl_headers(req.headers) if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') else: location = location.decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] if sys.version_info < (3, 0): location_escaped = location_escaped.encode('utf-8') resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response def make_socks_conn_class(base_class, socks_proxy): assert issubclass(base_class, ( compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection)) url_components = compat_urlparse.urlparse(socks_proxy) if url_components.scheme.lower() == 'socks5': socks_type = ProxyType.SOCKS5 elif url_components.scheme.lower() in ('socks', 'socks4'): socks_type = ProxyType.SOCKS4 elif url_components.scheme.lower() == 'socks4a': socks_type = ProxyType.SOCKS4A def unquote_if_non_empty(s): if not s: return s return compat_urllib_parse_unquote_plus(s) proxy_args = ( socks_type, url_components.hostname, url_components.port or 1080, True, # Remote DNS unquote_if_non_empty(url_components.username), unquote_if_non_empty(url_components.password), ) class SocksConnection(base_class): def connect(self): self.sock = sockssocket() self.sock.setproxy(*proxy_args) if type(self.timeout) in (int, float): self.sock.settimeout(self.timeout) self.sock.connect((self.host, self.port)) if isinstance(self, compat_http_client.HTTPSConnection): if hasattr(self, '_context'): # Python > 2.6 self.sock = self._context.wrap_socket( self.sock, server_hostname=self.host) else: self.sock = ssl.wrap_socket(self.sock) return SocksConnection class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} conn_class = self._https_conn_class if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, True), req, **kwargs) class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: m = re.search( r'(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group(0))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) try: date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) except ValueError: pass def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # %z (UTC offset) is only supported in python>=3.2 if not re.match(r'^[0-9]{1,2}-[0-9]{1,2}-[0-9]{4}$', date_str): date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) format_expressions = [ '%d %B %Y', '%d %b %Y', '%B %d %Y', '%b %d %Y', '%b %dst %Y %I:%M', '%b %dnd %Y %I:%M', '%b %dth %Y %I:%M', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', ] if day_first: format_expressions.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d.%m.%y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) else: format_expressions.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) for expression in format_expressions: try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: try: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') except ValueError: pass if upload_date is not None: return compat_str(upload_date) def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download elif guess.rstrip('/') in KNOWN_EXTENSIONS: return guess.rstrip('/') else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad approximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, '%Y%m%d').date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( (b'GetStdHandle', ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( (b'GetConsoleMode', ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return compat_struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: # Some platforms, such as Jython, is missing fcntl try: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) except ImportError: UNSUPPORTED_MSG = 'file locking is not supported on this platform' def _lock_file(f, exclusive): raise IOError(UNSUPPORTED_MSG) def _unlock_file(f): raise IOError(UNSUPPORTED_MSG) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ sdata = compat_urllib_parse_urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def lookup_unit_table(unit_table, s): units_re = '|'.join(re.escape(u) for u in unit_table) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = unit_table[m.group('unit')] return int(float(num_str) * mult) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and unofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, } return lookup_unit_table(_UNIT_TABLE, s) def parse_count(s): if s is None: return None s = s.strip() if re.match(r'^[\d,.]+$', s): return str_to_int(s) _UNIT_TABLE = { 'k': 1000, 'K': 1000, 'm': 1000 ** 2, 'M': 1000 ** 2, 'kk': 1000 ** 2, 'KK': 1000 ** 2, } return lookup_unit_table(_UNIT_TABLE, s) def month_by_name(name): """ Return the number of a month by (locale-independently) English name """ try: return ENGLISH_MONTH_NAMES.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) # ctypes in Jython is not complete # http://bugs.jython.org/issue2148 if sys.platform.startswith('java'): return try: libc = ctypes.cdll.LoadLibrary('libc.so.6') except OSError: return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): return s[len(start):] if s is not None and s.startswith(start) else s def remove_end(s, end): return s[:-len(end)] if s is not None and s.endswith(end) else s def remove_quotes(s): if s is None or len(s) < 2: return s for quote in ('"', "'", ): if s[0] == quote and s[-1] == quote: return s[1:-1] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] class HEADRequest(compat_urllib_request.Request): def get_method(self): return 'HEAD' def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() days, hours, mins, secs, ms = [None] * 5 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?$', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match( r'''(?ix)(?:P?T)? (?: (?P<days>[0-9]+)\s*d(?:ays?)?\s* )? (?: (?P<hours>[0-9]+)\s*h(?:ours?)?\s* )? (?: (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s* )? (?: (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s* )?$''', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)$', s) if m: hours, mins = m.groups() else: return None duration = 0 if secs: duration += float(secs) if mins: duration += float(mins) * 60 if hours: duration += float(hours) * 60 * 60 if days: duration += float(days) * 24 * 60 * 60 if ms: duration += float(ms) return duration def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize, use_cache=False): self._pagefunc = pagefunc self._pagesize = pagesize self._use_cache = use_cache if use_cache: self._cache = {} def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = None if self._use_cache: page_results = self._cache.get(pagenum) if page_results is None: page_results = list(self._pagefunc(pagenum)) if self._use_cache: self._cache[pagenum] = page_results startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( netloc=url_parsed.netloc.encode('idna').decode('ascii'), path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii') def update_url_query(url, query): if not query: return url parsed_url = compat_urlparse.urlparse(url) qs = compat_parse_qs(parsed_url.query) qs.update(query) return compat_urlparse.urlunparse(parsed_url._replace( query=compat_urllib_parse_urlencode(qs, True))) def update_Request(req, url=None, data=None, headers={}, query={}): req_headers = req.headers.copy() req_headers.update(headers) req_data = data or req.data req_url = update_url_query(url or req.get_full_url(), query) req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request new_req = req_type( req_url, data=req_data, headers=req_headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) if hasattr(req, 'timeout'): new_req.timeout = req.timeout return new_req def dict_get(d, key_or_keys, default=None, skip_false_values=True): if isinstance(key_or_keys, (list, tuple)): for key in key_or_keys: if key not in d or d[key] is None or skip_false_values and not d[key]: continue return d[key] return default return d.get(key_or_keys, default) def try_get(src, getter, expected_type=None): try: v = getter(src) except (AttributeError, KeyError, TypeError, IndexError): pass else: if expected_type is None or isinstance(v, expected_type): return v def encode_compat_str(string, encoding=preferredencoding(), errors='strict'): return string if isinstance(string, compat_str) else compat_str(string, encoding, errors) US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } def parse_age_limit(s): if s is None: return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) return int(m.group('age')) if m else US_RATINGS.get(s) def strip_jsonp(code): return re.sub( r'(?s)^[a-zA-Z0-9_.$]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code) def js_to_json(code): def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v elif v.startswith('/*') or v == ',': return "" if v[0] in ("'", '"'): v = re.sub(r'(?s)\\.|"', lambda m: { '"': '\\"', "\\'": "'", '\\\n': '', '\\x': '\\u00', }.get(m.group(0), m.group(0)), v[1:-1]) INTEGER_TABLE = ( (r'^0[xX][0-9a-fA-F]+', 16), (r'^0+[0-7]+', 8), ) for regex, base in INTEGER_TABLE: im = re.match(regex, v) if im: i = int(im.group(0), base) return '"%d":' % i if v.endswith(':') else '%d' % i return '"%s"' % v return re.sub(r'''(?sx) "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'| /\*.*?\*/|,(?=\s*[\]}])| [a-zA-Z_][.a-zA-Z_0-9]*| \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:\s*:)?| [0-9]+(?=\s*:) ''', fix_kv, code) def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(compat_shlex_quote(a) for a in args) def error_to_compat_str(err): err_str = str(err) # On python 2 error byte string must be decoded with proper # encoding rather than ascii if sys.version_info[0] < 3: err_str = err_str.decode(preferredencoding()) return err_str def mimetype2ext(mt): if mt is None: return None ext = { 'audio/mp4': 'm4a', # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as # it's the most popular one 'audio/mpeg': 'mp3', }.get(mt) if ext is not None: return ext _, _, res = mt.rpartition('/') return { '3gpp': '3gp', 'smptett+xml': 'tt', 'srt': 'srt', 'ttaf+xml': 'dfxp', 'ttml+xml': 'ttml', 'vtt': 'vtt', 'x-flv': 'flv', 'x-mp4-fragmented': 'mp4', 'x-ms-wmv': 'wmv', }.get(res, res) def urlhandle_detect_ext(url_handle): getheader = url_handle.headers.get cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] if m.group('strval') is not None: if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('strval') else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) actual_value = dct.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.')) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): _x = functools.partial(xpath_with_ns, ns_map={ 'ttml': 'http://www.w3.org/ns/ttml', 'ttaf1': 'http://www.w3.org/2006/10/ttaf1', 'ttaf1_0604': 'http://www.w3.org/2006/04/ttaf1', }) class TTMLPElementParser(object): out = '' def start(self, tag, attrib): if tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'): self.out += '\n' def end(self, tag): pass def data(self, data): self.out += data def close(self): return self.out.strip() def parse_node(node): target = TTMLPElementParser() parser = xml.etree.ElementTree.XMLParser(target=target) parser.feed(xml.etree.ElementTree.tostring(node)) return parser.close() dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8')) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall(_x('.//ttaf1_0604:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) dur = parse_dfxp_time_expr(para.attrib.get('dur')) if begin_time is None: continue if not end_time: if not dur: continue end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) return compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'): req.add_header('Ytdl-socks-proxy', proxy) # youtube-dl's http/https handlers do wrapping the socket with socks return None return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type) def ohdave_rsa_encrypt(data, exponent, modulus): ''' Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/ Input: data: data to encrypt, bytes-like object exponent, modulus: parameter e and N of RSA algorithm, both integer Output: hex string of encrypted data Limitation: supports one block encryption only ''' payload = int(binascii.hexlify(data[::-1]), 16) encrypted = pow(payload, exponent, modulus) return '%x' % encrypted def encode_base_n(num, n, table=None): FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' if not table: table = FULL_TABLE[:n] if n > len(table): raise ValueError('base %d exceeds table length %d' % (n, len(table))) if num == 0: return table[0] ret = '' while num: ret = table[num % n] + ret num = num // n return ret def decode_packed_codes(code): mobj = re.search( r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)", code) obfucasted_code, base, count, symbols = mobj.groups() base = int(base) count = int(count) symbols = symbols.split('|') symbol_table = {} while count: count -= 1 base_n_count = encode_base_n(count, base) symbol_table[base_n_count] = symbols[count] or base_n_count return re.sub( r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)], obfucasted_code)
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-26-fixed/youtube-dl/youtube_dl/utils.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-26-buggy/youtube-dl/youtube_dl/utils.py
youtube-dl-bug-33
from __future__ import unicode_literals import re from .subtitles import SubtitlesInfoExtractor from .common import ExtractorError from ..utils import parse_iso8601 class DRTVIE(SubtitlesInfoExtractor): _VALID_URL = r'http://(?:www\.)?dr\.dk/tv/se/(?:[^/]+/)+(?P<id>[\da-z-]+)(?:[/#?]|$)' _TEST = { 'url': 'http://www.dr.dk/tv/se/partiets-mand/partiets-mand-7-8', 'md5': '4a7e1dd65cdb2643500a3f753c942f25', 'info_dict': { 'id': 'partiets-mand-7-8', 'ext': 'mp4', 'title': 'Partiets mand (7:8)', 'description': 'md5:a684b90a8f9336cd4aab94b7647d7862', 'timestamp': 1403047940, 'upload_date': '20140617', 'duration': 1299.040, }, } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') programcard = self._download_json( 'http://www.dr.dk/mu/programcard/expanded/%s' % video_id, video_id, 'Downloading video JSON') data = programcard['Data'][0] title = data['Title'] description = data['Description'] timestamp = parse_iso8601(data['CreatedTime'][:-5]) thumbnail = None duration = None restricted_to_denmark = False formats = [] subtitles = {} for asset in data['Assets']: if asset['Kind'] == 'Image': thumbnail = asset['Uri'] elif asset['Kind'] == 'VideoResource': duration = asset['DurationInMilliseconds'] / 1000.0 restricted_to_denmark = asset['RestrictedToDenmark'] for link in asset['Links']: target = link['Target'] uri = link['Uri'] formats.append({ 'url': uri + '?hdcore=3.3.0&plugin=aasp-3.3.0.99.43' if target == 'HDS' else uri, 'format_id': target, 'ext': link['FileFormat'], 'preference': -1 if target == 'HDS' else -2, }) subtitles_list = asset.get('SubtitlesList') if isinstance(subtitles_list, list): LANGS = { 'Danish': 'dk', } for subs in subtitles_list: lang = subs['Language'] subtitles[LANGS.get(lang, lang)] = subs['Uri'] if not formats and restricted_to_denmark: raise ExtractorError( 'Unfortunately, DR is not allowed to show this program outside Denmark.', expected=True) self._sort_formats(formats) if self._downloader.params.get('listsubtitles', False): self._list_available_subtitles(video_id, subtitles) return return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'timestamp': timestamp, 'duration': duration, 'formats': formats, 'subtitles': self.extract_subtitles(video_id, subtitles), } from __future__ import unicode_literals from .subtitles import SubtitlesInfoExtractor from .common import ExtractorError from ..utils import parse_iso8601 class DRTVIE(SubtitlesInfoExtractor): _VALID_URL = r'http://(?:www\.)?dr\.dk/tv/se/(?:[^/]+/)+(?P<id>[\da-z-]+)(?:[/#?]|$)' _TEST = { 'url': 'http://www.dr.dk/tv/se/partiets-mand/partiets-mand-7-8', 'md5': '4a7e1dd65cdb2643500a3f753c942f25', 'info_dict': { 'id': 'partiets-mand-7-8', 'ext': 'mp4', 'title': 'Partiets mand (7:8)', 'description': 'md5:a684b90a8f9336cd4aab94b7647d7862', 'timestamp': 1403047940, 'upload_date': '20140617', 'duration': 1299.040, }, } def _real_extract(self, url): video_id = self._match_id(url) programcard = self._download_json( 'http://www.dr.dk/mu/programcard/expanded/%s' % video_id, video_id, 'Downloading video JSON') data = programcard['Data'][0] title = data['Title'] description = data['Description'] timestamp = parse_iso8601(data['CreatedTime']) thumbnail = None duration = None restricted_to_denmark = False formats = [] subtitles = {} for asset in data['Assets']: if asset['Kind'] == 'Image': thumbnail = asset['Uri'] elif asset['Kind'] == 'VideoResource': duration = asset['DurationInMilliseconds'] / 1000.0 restricted_to_denmark = asset['RestrictedToDenmark'] for link in asset['Links']: target = link['Target'] uri = link['Uri'] formats.append({ 'url': uri + '?hdcore=3.3.0&plugin=aasp-3.3.0.99.43' if target == 'HDS' else uri, 'format_id': target, 'ext': link['FileFormat'], 'preference': -1 if target == 'HDS' else -2, }) subtitles_list = asset.get('SubtitlesList') if isinstance(subtitles_list, list): LANGS = { 'Danish': 'dk', } for subs in subtitles_list: lang = subs['Language'] subtitles[LANGS.get(lang, lang)] = subs['Uri'] if not formats and restricted_to_denmark: raise ExtractorError( 'Unfortunately, DR is not allowed to show this program outside Denmark.', expected=True) self._sort_formats(formats) if self._downloader.params.get('listsubtitles', False): self._list_available_subtitles(video_id, subtitles) return return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'timestamp': timestamp, 'duration': duration, 'formats': formats, 'subtitles': self.extract_subtitles(video_id, subtitles), }
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-33-fixed/youtube-dl/youtube_dl/extractor/drtv.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-33-buggy/youtube-dl/youtube_dl/extractor/drtv.py
youtube-dl-bug-17
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals import base64 import binascii import calendar import codecs import contextlib import ctypes import datetime import email.utils import email.header import errno import functools import gzip import io import itertools import json import locale import math import operator import os import platform import random import re import socket import ssl import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_HTMLParseError, compat_HTMLParser, compat_basestring, compat_chr, compat_etree_fromstring, compat_expanduser, compat_html_entities, compat_html_entities_html5, compat_http_client, compat_kwargs, compat_os_name, compat_parse_qs, compat_shlex_quote, compat_socket_create_connection, compat_str, compat_struct_pack, compat_struct_unpack, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, compat_urllib_parse_unquote_plus, compat_urllib_request, compat_urlparse, compat_xpath, ) from .socks import ( ProxyType, sockssocket, ) def register_socks_protocols(): # "Register" SOCKS protocols # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904 # URLs with protocols not in urlparse.uses_netloc are not handled correctly for scheme in ('socks', 'socks4', 'socks4a', 'socks5'): if scheme not in compat_urlparse.uses_netloc: compat_urlparse.uses_netloc.append(scheme) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } USER_AGENTS = { 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] MONTH_NAMES = { 'en': ENGLISH_MONTH_NAMES, 'fr': [ 'janvier', 'février', 'mars', 'avril', 'mai', 'juin', 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'], } KNOWN_EXTENSIONS = ( 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', 'flv', 'f4v', 'f4a', 'f4b', 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', 'mkv', 'mka', 'mk3d', 'avi', 'divx', 'mov', 'asf', 'wmv', 'wma', '3gp', '3g2', 'mp3', 'flac', 'ape', 'wav', 'f4f', 'f4m', 'm3u8', 'smil') # needed for sanitizing filenames in restricted mode ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'], 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy'))) DATE_FORMATS = ( '%d %B %Y', '%d %b %Y', '%B %d %Y', '%B %dst %Y', '%B %dnd %Y', '%B %dth %Y', '%b %d %Y', '%b %dst %Y', '%b %dnd %Y', '%b %dth %Y', '%b %dst %Y %I:%M', '%b %dnd %Y %I:%M', '%b %dth %Y %I:%M', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', '%b %d %Y at %H:%M', '%b %d %Y at %H:%M:%S', ) DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS) DATE_FORMATS_DAY_FIRST.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d.%m.%y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS) DATE_FORMATS_MONTH_FIRST.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)" def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): for f in node.findall(compat_xpath(xpath)): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): return node.find(compat_xpath(xpath)) if isinstance(xpath, (str, compat_str)): n = _find_xpath(xpath) else: for xp in xpath: n = _find_xpath(xp) if n is not None: break if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute('id', id, html) def get_element_by_class(class_name, html): """Return the content of the first tag with the specified class in the passed HTML document""" retval = get_elements_by_class(class_name, html) return retval[0] if retval else None def get_element_by_attribute(attribute, value, html, escape_value=True): retval = get_elements_by_attribute(attribute, value, html, escape_value) return retval[0] if retval else None def get_elements_by_class(class_name, html): """Return the content of all tags with the specified class in the passed HTML document as a list""" return get_elements_by_attribute( 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name), html, escape_value=False) def get_elements_by_attribute(attribute, value, html, escape_value=True): """Return the content of the tag with the specified attribute in the passed HTML document""" value = re.escape(value) if escape_value else value retlist = [] for m in re.finditer(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), value), html): res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] retlist.append(unescapeHTML(res)) return retlist class HTMLAttributeParser(compat_HTMLParser): """Trivial HTML parser to gather the attributes for a single element""" def __init__(self): self.attrs = {} compat_HTMLParser.__init__(self) def handle_starttag(self, tag, attrs): self.attrs = dict(attrs) def extract_attributes(html_element): """Given a string for an HTML element such as <el a="foo" B="bar" c="&98;az" d=boz empty= noval entity="&amp;" sq='"' dq="'" > Decode and return a dictionary of attributes. { 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz', 'empty': '', 'noval': None, 'entity': '&', 'sq': '"', 'dq': '\'' }. NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions, but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5. """ parser = HTMLAttributeParser() try: parser.feed(html_element) parser.close() # Older Python may throw HTMLParseError in case of malformed HTML except compat_HTMLParseError: pass return parser.attrs def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'(?u)\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'(?u)<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible. """ def replace_insane(char): if restricted and char in ACCENT_CHARS: return ACCENT_CHARS[char] if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of # unwanted failures due to missing protocol def sanitize_url(url): return 'http:%s' % url if url.startswith('//') else url def sanitized_Request(url, *args, **kwargs): return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs) def expand_path(s): """Expand shell variables and ~""" return os.path.expandvars(compat_expanduser(s)) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity_with_semicolon): """Transforms an HTML entity to a character.""" entity = entity_with_semicolon[:-1] # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) # TODO: HTML5 allows entities without a semicolon. For example, # '&Eacuteric' should be decoded as 'Éric'. if entity_with_semicolon in compat_html_entities_html5: return compat_html_entities_html5[entity_with_semicolon] mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 # See https://github.com/rg3/youtube-dl/issues/7518 try: return compat_chr(int(numstr, base)) except ValueError: pass # Unknown entity in name, return its literal representation return '&%s;' % entity def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+;)', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible if sys.platform.startswith('java'): return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class YoutubeDLError(Exception): """Base exception for YoutubeDL errors.""" pass class ExtractorError(YoutubeDLError): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class GeoRestrictedError(ExtractorError): """Geographic restriction Error exception. This exception may be thrown when a video is not available from your geographic location due to geographic restrictions imposed by a website. """ def __init__(self, msg, countries=None): super(GeoRestrictedError, self).__init__(msg, expected=True) self.msg = msg self.countries = countries class DownloadError(YoutubeDLError): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(YoutubeDLError): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(YoutubeDLError): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): super(PostProcessingError, self).__init__(msg) self.msg = msg class MaxDownloadsReached(YoutubeDLError): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(YoutubeDLError): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(YoutubeDLError): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): super(ContentTooShortError, self).__init__( 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected) ) # Both in bytes self.downloaded = downloaded self.expected = expected class XAttrMetadataError(YoutubeDLError): def __init__(self, code=None, msg='Unknown error'): super(XAttrMetadataError, self).__init__(msg) self.code = code self.msg = msg # Parsing code and msg if (self.code in (errno.ENOSPC, errno.EDQUOT) or 'No space left' in self.msg or 'Disk quota excedded' in self.msg): self.reason = 'NO_SPACE' elif self.code == errno.E2BIG or 'Argument list too long' in self.msg: self.reason = 'VALUE_TOO_LONG' else: self.reason = 'NOT_SUPPORTED' class XAttrUnavailableError(YoutubeDLError): pass def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs[b'strict'] = True hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc def handle_youtubedl_headers(headers): filtered_headers = headers if 'Youtubedl-no-compression' in filtered_headers: filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') del filtered_headers['Youtubedl-no-compression'] return filtered_headers class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-no-compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): conn_class = compat_http_client.HTTPConnection socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req = update_Request(req, url=url_escaped) for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) req.headers = handle_youtubedl_headers(req.headers) if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') else: location = location.decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] if sys.version_info < (3, 0): location_escaped = location_escaped.encode('utf-8') resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response def make_socks_conn_class(base_class, socks_proxy): assert issubclass(base_class, ( compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection)) url_components = compat_urlparse.urlparse(socks_proxy) if url_components.scheme.lower() == 'socks5': socks_type = ProxyType.SOCKS5 elif url_components.scheme.lower() in ('socks', 'socks4'): socks_type = ProxyType.SOCKS4 elif url_components.scheme.lower() == 'socks4a': socks_type = ProxyType.SOCKS4A def unquote_if_non_empty(s): if not s: return s return compat_urllib_parse_unquote_plus(s) proxy_args = ( socks_type, url_components.hostname, url_components.port or 1080, True, # Remote DNS unquote_if_non_empty(url_components.username), unquote_if_non_empty(url_components.password), ) class SocksConnection(base_class): def connect(self): self.sock = sockssocket() self.sock.setproxy(*proxy_args) if type(self.timeout) in (int, float): self.sock.settimeout(self.timeout) self.sock.connect((self.host, self.port)) if isinstance(self, compat_http_client.HTTPSConnection): if hasattr(self, '_context'): # Python > 2.6 self.sock = self._context.wrap_socket( self.sock, server_hostname=self.host) else: self.sock = ssl.wrap_socket(self.sock) return SocksConnection class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} conn_class = self._https_conn_class if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, True), req, **kwargs) class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def extract_timezone(date_str): m = re.search( r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group('tz'))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) return timezone, date_str def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: timezone, date_str = extract_timezone(date_str) try: date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) except ValueError: pass def date_formats(day_first=True): return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) _, date_str = extract_timezone(date_str) for expression in date_formats(day_first): try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: try: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') except ValueError: pass if upload_date is not None: return compat_str(upload_date) def unified_timestamp(date_str, day_first=True): if date_str is None: return None date_str = re.sub(r'[,|]', '', date_str) pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0 timezone, date_str = extract_timezone(date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) # Remove unrecognized timezones from ISO 8601 alike timestamps m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str) if m: date_str = date_str[:-len(m.group('tz'))] for expression in date_formats(day_first): try: dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta) return calendar.timegm(dt.timetuple()) except ValueError: pass timetuple = email.utils.parsedate_tz(date_str) if timetuple: return calendar.timegm(timetuple) + pm_delta * 3600 def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download elif guess.rstrip('/') in KNOWN_EXTENSIONS: return guess.rstrip('/') else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad approximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, '%Y%m%d').date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( (b'GetStdHandle', ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( (b'GetConsoleMode', ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return compat_struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: # Some platforms, such as Jython, is missing fcntl try: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) except ImportError: UNSUPPORTED_MSG = 'file locking is not supported on this platform' def _lock_file(f, exclusive): raise IOError(UNSUPPORTED_MSG) def _unlock_file(f): raise IOError(UNSUPPORTED_MSG) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(compat_shlex_quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ url, idata = unsmuggle_url(url, {}) data.update(idata) sdata = compat_urllib_parse_urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def lookup_unit_table(unit_table, s): units_re = '|'.join(re.escape(u) for u in unit_table) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = unit_table[m.group('unit')] return int(float(num_str) * mult) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and unofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'bytes': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'kb': 1000, 'kilobytes': 1000, 'kibibytes': 1024, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'mb': 1000 ** 2, 'megabytes': 1000 ** 2, 'mebibytes': 1024 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'gb': 1000 ** 3, 'gigabytes': 1000 ** 3, 'gibibytes': 1024 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'tb': 1000 ** 4, 'terabytes': 1000 ** 4, 'tebibytes': 1024 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'pb': 1000 ** 5, 'petabytes': 1000 ** 5, 'pebibytes': 1024 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'eb': 1000 ** 6, 'exabytes': 1000 ** 6, 'exbibytes': 1024 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'zb': 1000 ** 7, 'zettabytes': 1000 ** 7, 'zebibytes': 1024 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, 'yb': 1000 ** 8, 'yottabytes': 1000 ** 8, 'yobibytes': 1024 ** 8, } return lookup_unit_table(_UNIT_TABLE, s) def parse_count(s): if s is None: return None s = s.strip() if re.match(r'^[\d,.]+$', s): return str_to_int(s) _UNIT_TABLE = { 'k': 1000, 'K': 1000, 'm': 1000 ** 2, 'M': 1000 ** 2, 'kk': 1000 ** 2, 'KK': 1000 ** 2, } return lookup_unit_table(_UNIT_TABLE, s) def month_by_name(name, lang='en'): """ Return the number of a month by (locale-independently) English name """ month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en']) try: return month_names.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) # ctypes in Jython is not complete # http://bugs.jython.org/issue2148 if sys.platform.startswith('java'): return try: libc = ctypes.cdll.LoadLibrary('libc.so.6') except OSError: return except TypeError: # LoadLibrary in Windows Python 2.7.13 only expects # a bytestring, but since unicode_literals turns # every string into a unicode string, it fails. return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): return s[len(start):] if s is not None and s.startswith(start) else s def remove_end(s, end): return s[:-len(end)] if s is not None and s.endswith(end) else s def remove_quotes(s): if s is None or len(s) < 2: return s for quote in ('"', "'", ): if s[0] == quote and s[-1] == quote: return s[1:-1] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] def base_url(url): return re.match(r'https?://[^?#&]+/', url).group() def urljoin(base, path): if isinstance(path, bytes): path = path.decode('utf-8') if not isinstance(path, compat_str) or not path: return None if re.match(r'^(?:https?:)?//', path): return path if isinstance(base, bytes): base = base.decode('utf-8') if not isinstance(base, compat_str) or not re.match( r'^(?:https?:)?//', base): return None return compat_urlparse.urljoin(base, path) class HEADRequest(compat_urllib_request.Request): def get_method(self): return 'HEAD' class PUTRequest(compat_urllib_request.Request): def get_method(self): return 'PUT' def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def strip_or_none(v): return None if v is None else v.strip() def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() days, hours, mins, secs, ms = [None] * 5 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match( r'''(?ix)(?:P?T)? (?: (?P<days>[0-9]+)\s*d(?:ays?)?\s* )? (?: (?P<hours>[0-9]+)\s*h(?:ours?)?\s* )? (?: (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s* )? (?: (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s* )?Z?$''', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s) if m: hours, mins = m.groups() else: return None duration = 0 if secs: duration += float(secs) if mins: duration += float(mins) * 60 if hours: duration += float(hours) * 60 * 60 if days: duration += float(days) * 24 * 60 * 60 if ms: duration += float(ms) return duration def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers # SIGTTOU if youtube-dl is run in the background. # See https://github.com/rg3/youtube-dl/issues/955#issuecomment-209789656 out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize, use_cache=False): self._pagefunc = pagefunc self._pagesize = pagesize self._use_cache = use_cache if use_cache: self._cache = {} def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = None if self._use_cache: page_results = self._cache.get(pagenum) if page_results is None: page_results = list(self._pagefunc(pagenum)) if self._use_cache: self._cache[pagenum] = page_results startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( netloc=url_parsed.netloc.encode('idna').decode('ascii'), path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii') def update_url_query(url, query): if not query: return url parsed_url = compat_urlparse.urlparse(url) qs = compat_parse_qs(parsed_url.query) qs.update(query) return compat_urlparse.urlunparse(parsed_url._replace( query=compat_urllib_parse_urlencode(qs, True))) def update_Request(req, url=None, data=None, headers={}, query={}): req_headers = req.headers.copy() req_headers.update(headers) req_data = data or req.data req_url = update_url_query(url or req.get_full_url(), query) req_get_method = req.get_method() if req_get_method == 'HEAD': req_type = HEADRequest elif req_get_method == 'PUT': req_type = PUTRequest else: req_type = compat_urllib_request.Request new_req = req_type( req_url, data=req_data, headers=req_headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) if hasattr(req, 'timeout'): new_req.timeout = req.timeout return new_req def _multipart_encode_impl(data, boundary): content_type = 'multipart/form-data; boundary=%s' % boundary out = b'' for k, v in data.items(): out += b'--' + boundary.encode('ascii') + b'\r\n' if isinstance(k, compat_str): k = k.encode('utf-8') if isinstance(v, compat_str): v = v.encode('utf-8') # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578 # suggests sending UTF-8 directly. Firefox sends UTF-8, too content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n' if boundary.encode('ascii') in content: raise ValueError('Boundary overlaps with data') out += content out += b'--' + boundary.encode('ascii') + b'--\r\n' return out, content_type def multipart_encode(data, boundary=None): ''' Encode a dict to RFC 7578-compliant form-data data: A dict where keys and values can be either Unicode or bytes-like objects. boundary: If specified a Unicode object, it's used as the boundary. Otherwise a random boundary is generated. Reference: https://tools.ietf.org/html/rfc7578 ''' has_specified_boundary = boundary is not None while True: if boundary is None: boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff)) try: out, content_type = _multipart_encode_impl(data, boundary) break except ValueError: if has_specified_boundary: raise boundary = None return out, content_type def dict_get(d, key_or_keys, default=None, skip_false_values=True): if isinstance(key_or_keys, (list, tuple)): for key in key_or_keys: if key not in d or d[key] is None or skip_false_values and not d[key]: continue return d[key] return default return d.get(key_or_keys, default) def try_get(src, getter, expected_type=None): if not isinstance(getter, (list, tuple)): getter = [getter] for get in getter: try: v = get(src) except (AttributeError, KeyError, TypeError, IndexError): pass else: if expected_type is None or isinstance(v, expected_type): return v def encode_compat_str(string, encoding=preferredencoding(), errors='strict'): return string if isinstance(string, compat_str) else compat_str(string, encoding, errors) US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } TV_PARENTAL_GUIDELINES = { 'TV-Y': 0, 'TV-Y7': 7, 'TV-G': 0, 'TV-PG': 0, 'TV-14': 14, 'TV-MA': 17, } def parse_age_limit(s): if type(s) == int: return s if 0 <= s <= 21 else None if not isinstance(s, compat_basestring): return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) if m: return int(m.group('age')) if s in US_RATINGS: return US_RATINGS[s] return TV_PARENTAL_GUIDELINES.get(s) def strip_jsonp(code): return re.sub( r'''(?sx)^ (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]+) (?:\s*&&\s*(?P=func_name))? \s*\(\s*(?P<callback_data>.*)\);? \s*?(?://[^\n]*)*$''', r'\g<callback_data>', code) def js_to_json(code): COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*' SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE) INTEGER_TABLE = ( (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16), (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8), ) def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v elif v.startswith('/*') or v.startswith('//') or v == ',': return "" if v[0] in ("'", '"'): v = re.sub(r'(?s)\\.|"', lambda m: { '"': '\\"', "\\'": "'", '\\\n': '', '\\x': '\\u00', }.get(m.group(0), m.group(0)), v[1:-1]) for regex, base in INTEGER_TABLE: im = re.match(regex, v) if im: i = int(im.group(1), base) return '"%d":' % i if v.endswith(':') else '%d' % i return '"%s"' % v return re.sub(r'''(?sx) "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'| {comment}|,(?={skip}[\]}}])| [a-zA-Z_][.a-zA-Z_0-9]*| \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?| [0-9]+(?={skip}:) '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code) def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(compat_shlex_quote(a) for a in args) def error_to_compat_str(err): err_str = str(err) # On python 2 error byte string must be decoded with proper # encoding rather than ascii if sys.version_info[0] < 3: err_str = err_str.decode(preferredencoding()) return err_str def mimetype2ext(mt): if mt is None: return None ext = { 'audio/mp4': 'm4a', # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as # it's the most popular one 'audio/mpeg': 'mp3', }.get(mt) if ext is not None: return ext _, _, res = mt.rpartition('/') res = res.split(';')[0].strip().lower() return { '3gpp': '3gp', 'smptett+xml': 'tt', 'ttaf+xml': 'dfxp', 'ttml+xml': 'ttml', 'x-flv': 'flv', 'x-mp4-fragmented': 'mp4', 'x-ms-wmv': 'wmv', 'mpegurl': 'm3u8', 'x-mpegurl': 'm3u8', 'vnd.apple.mpegurl': 'm3u8', 'dash+xml': 'mpd', 'f4m+xml': 'f4m', 'hds+xml': 'f4m', 'vnd.ms-sstr+xml': 'ism', 'quicktime': 'mov', 'mp2t': 'ts', }.get(res, res) def parse_codecs(codecs_str): # http://tools.ietf.org/html/rfc6381 if not codecs_str: return {} splited_codecs = list(filter(None, map( lambda str: str.strip(), codecs_str.strip().strip(',').split(',')))) vcodec, acodec = None, None for full_codec in splited_codecs: codec = full_codec.split('.')[0] if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v'): if not vcodec: vcodec = full_codec elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'): if not acodec: acodec = full_codec else: write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr) if not vcodec and not acodec: if len(splited_codecs) == 2: return { 'vcodec': vcodec, 'acodec': acodec, } elif len(splited_codecs) == 1: return { 'vcodec': 'none', 'acodec': vcodec, } else: return { 'vcodec': vcodec or 'none', 'acodec': acodec or 'none', } return {} def urlhandle_detect_ext(url_handle): getheader = url_handle.headers.get cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<quote>["\'])(?P<quotedstrval>(?:\\.|(?!(?P=quote)|\\).)+?)(?P=quote)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) if (m.group('quotedstrval') is not None or m.group('strval') is not None or # If the original field is a string and matching comparisonvalue is # a number we should respect the origin of the original field # and process comparison value as a string (see # https://github.com/rg3/youtube-dl/issues/11082). actual_value is not None and m.group('intval') is not None and isinstance(actual_value, compat_str)): if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval') quote = m.group('quote') if quote is not None: comparison_value = comparison_value.replace(r'\%s' % quote, quote) else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.')) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): LEGACY_NAMESPACES = ( ('http://www.w3.org/ns/ttml', [ 'http://www.w3.org/2004/11/ttaf1', 'http://www.w3.org/2006/04/ttaf1', 'http://www.w3.org/2006/10/ttaf1', ]), ('http://www.w3.org/ns/ttml#styling', [ 'http://www.w3.org/ns/ttml#style', ]), ) SUPPORTED_STYLING = [ 'color', 'fontFamily', 'fontSize', 'fontStyle', 'fontWeight', 'textDecoration' ] _x = functools.partial(xpath_with_ns, ns_map={ 'ttml': 'http://www.w3.org/ns/ttml', 'tts': 'http://www.w3.org/ns/ttml#styling', }) styles = {} default_style = {} class TTMLPElementParser(object): _out = '' _unclosed_elements = [] _applied_styles = [] def start(self, tag, attrib): if tag in (_x('ttml:br'), 'br'): self._out += '\n' else: unclosed_elements = [] style = {} element_style_id = attrib.get('style') if default_style: style.update(default_style) if element_style_id: style.update(styles.get(element_style_id, {})) for prop in SUPPORTED_STYLING: prop_val = attrib.get(_x('tts:' + prop)) if prop_val: style[prop] = prop_val if style: font = '' for k, v in sorted(style.items()): if self._applied_styles and self._applied_styles[-1].get(k) == v: continue if k == 'color': font += ' color="%s"' % v elif k == 'fontSize': font += ' size="%s"' % v elif k == 'fontFamily': font += ' face="%s"' % v elif k == 'fontWeight' and v == 'bold': self._out += '<b>' unclosed_elements.append('b') elif k == 'fontStyle' and v == 'italic': self._out += '<i>' unclosed_elements.append('i') elif k == 'textDecoration' and v == 'underline': self._out += '<u>' unclosed_elements.append('u') if font: self._out += '<font' + font + '>' unclosed_elements.append('font') applied_style = {} if self._applied_styles: applied_style.update(self._applied_styles[-1]) applied_style.update(style) self._applied_styles.append(applied_style) self._unclosed_elements.append(unclosed_elements) def end(self, tag): if tag not in (_x('ttml:br'), 'br'): unclosed_elements = self._unclosed_elements.pop() for element in reversed(unclosed_elements): self._out += '</%s>' % element if unclosed_elements and self._applied_styles: self._applied_styles.pop() def data(self, data): self._out += data def close(self): return self._out.strip() def parse_node(node): target = TTMLPElementParser() parser = xml.etree.ElementTree.XMLParser(target=target) parser.feed(xml.etree.ElementTree.tostring(node)) return parser.close() for k, v in LEGACY_NAMESPACES: for ns in v: dfxp_data = dfxp_data.replace(ns, k) dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8')) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') repeat = False while True: for style in dfxp.findall(_x('.//ttml:style')): style_id = style.get('id') parent_style_id = style.get('style') if parent_style_id: if parent_style_id not in styles: repeat = True continue styles[style_id] = styles[parent_style_id].copy() for prop in SUPPORTED_STYLING: prop_val = style.get(_x('tts:' + prop)) if prop_val: styles.setdefault(style_id, {})[prop] = prop_val if repeat: repeat = False else: break for p in ('body', 'div'): ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p]) if ele is None: continue style = styles.get(ele.get('style')) if not style: continue default_style.update(style) for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) dur = parse_dfxp_time_expr(para.attrib.get('dur')) if begin_time is None: continue if not end_time: if not dur: continue end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) if param: param = compat_str(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class GeoUtils(object): # Major IPv4 address blocks per country _country_ip_map = { 'AD': '85.94.160.0/19', 'AE': '94.200.0.0/13', 'AF': '149.54.0.0/17', 'AG': '209.59.64.0/18', 'AI': '204.14.248.0/21', 'AL': '46.99.0.0/16', 'AM': '46.70.0.0/15', 'AO': '105.168.0.0/13', 'AP': '159.117.192.0/21', 'AR': '181.0.0.0/12', 'AS': '202.70.112.0/20', 'AT': '84.112.0.0/13', 'AU': '1.128.0.0/11', 'AW': '181.41.0.0/18', 'AZ': '5.191.0.0/16', 'BA': '31.176.128.0/17', 'BB': '65.48.128.0/17', 'BD': '114.130.0.0/16', 'BE': '57.0.0.0/8', 'BF': '129.45.128.0/17', 'BG': '95.42.0.0/15', 'BH': '37.131.0.0/17', 'BI': '154.117.192.0/18', 'BJ': '137.255.0.0/16', 'BL': '192.131.134.0/24', 'BM': '196.12.64.0/18', 'BN': '156.31.0.0/16', 'BO': '161.56.0.0/16', 'BQ': '161.0.80.0/20', 'BR': '152.240.0.0/12', 'BS': '24.51.64.0/18', 'BT': '119.2.96.0/19', 'BW': '168.167.0.0/16', 'BY': '178.120.0.0/13', 'BZ': '179.42.192.0/18', 'CA': '99.224.0.0/11', 'CD': '41.243.0.0/16', 'CF': '196.32.200.0/21', 'CG': '197.214.128.0/17', 'CH': '85.0.0.0/13', 'CI': '154.232.0.0/14', 'CK': '202.65.32.0/19', 'CL': '152.172.0.0/14', 'CM': '165.210.0.0/15', 'CN': '36.128.0.0/10', 'CO': '181.240.0.0/12', 'CR': '201.192.0.0/12', 'CU': '152.206.0.0/15', 'CV': '165.90.96.0/19', 'CW': '190.88.128.0/17', 'CY': '46.198.0.0/15', 'CZ': '88.100.0.0/14', 'DE': '53.0.0.0/8', 'DJ': '197.241.0.0/17', 'DK': '87.48.0.0/12', 'DM': '192.243.48.0/20', 'DO': '152.166.0.0/15', 'DZ': '41.96.0.0/12', 'EC': '186.68.0.0/15', 'EE': '90.190.0.0/15', 'EG': '156.160.0.0/11', 'ER': '196.200.96.0/20', 'ES': '88.0.0.0/11', 'ET': '196.188.0.0/14', 'EU': '2.16.0.0/13', 'FI': '91.152.0.0/13', 'FJ': '144.120.0.0/16', 'FM': '119.252.112.0/20', 'FO': '88.85.32.0/19', 'FR': '90.0.0.0/9', 'GA': '41.158.0.0/15', 'GB': '25.0.0.0/8', 'GD': '74.122.88.0/21', 'GE': '31.146.0.0/16', 'GF': '161.22.64.0/18', 'GG': '62.68.160.0/19', 'GH': '45.208.0.0/14', 'GI': '85.115.128.0/19', 'GL': '88.83.0.0/19', 'GM': '160.182.0.0/15', 'GN': '197.149.192.0/18', 'GP': '104.250.0.0/19', 'GQ': '105.235.224.0/20', 'GR': '94.64.0.0/13', 'GT': '168.234.0.0/16', 'GU': '168.123.0.0/16', 'GW': '197.214.80.0/20', 'GY': '181.41.64.0/18', 'HK': '113.252.0.0/14', 'HN': '181.210.0.0/16', 'HR': '93.136.0.0/13', 'HT': '148.102.128.0/17', 'HU': '84.0.0.0/14', 'ID': '39.192.0.0/10', 'IE': '87.32.0.0/12', 'IL': '79.176.0.0/13', 'IM': '5.62.80.0/20', 'IN': '117.192.0.0/10', 'IO': '203.83.48.0/21', 'IQ': '37.236.0.0/14', 'IR': '2.176.0.0/12', 'IS': '82.221.0.0/16', 'IT': '79.0.0.0/10', 'JE': '87.244.64.0/18', 'JM': '72.27.0.0/17', 'JO': '176.29.0.0/16', 'JP': '126.0.0.0/8', 'KE': '105.48.0.0/12', 'KG': '158.181.128.0/17', 'KH': '36.37.128.0/17', 'KI': '103.25.140.0/22', 'KM': '197.255.224.0/20', 'KN': '198.32.32.0/19', 'KP': '175.45.176.0/22', 'KR': '175.192.0.0/10', 'KW': '37.36.0.0/14', 'KY': '64.96.0.0/15', 'KZ': '2.72.0.0/13', 'LA': '115.84.64.0/18', 'LB': '178.135.0.0/16', 'LC': '192.147.231.0/24', 'LI': '82.117.0.0/19', 'LK': '112.134.0.0/15', 'LR': '41.86.0.0/19', 'LS': '129.232.0.0/17', 'LT': '78.56.0.0/13', 'LU': '188.42.0.0/16', 'LV': '46.109.0.0/16', 'LY': '41.252.0.0/14', 'MA': '105.128.0.0/11', 'MC': '88.209.64.0/18', 'MD': '37.246.0.0/16', 'ME': '178.175.0.0/17', 'MF': '74.112.232.0/21', 'MG': '154.126.0.0/17', 'MH': '117.103.88.0/21', 'MK': '77.28.0.0/15', 'ML': '154.118.128.0/18', 'MM': '37.111.0.0/17', 'MN': '49.0.128.0/17', 'MO': '60.246.0.0/16', 'MP': '202.88.64.0/20', 'MQ': '109.203.224.0/19', 'MR': '41.188.64.0/18', 'MS': '208.90.112.0/22', 'MT': '46.11.0.0/16', 'MU': '105.16.0.0/12', 'MV': '27.114.128.0/18', 'MW': '105.234.0.0/16', 'MX': '187.192.0.0/11', 'MY': '175.136.0.0/13', 'MZ': '197.218.0.0/15', 'NA': '41.182.0.0/16', 'NC': '101.101.0.0/18', 'NE': '197.214.0.0/18', 'NF': '203.17.240.0/22', 'NG': '105.112.0.0/12', 'NI': '186.76.0.0/15', 'NL': '145.96.0.0/11', 'NO': '84.208.0.0/13', 'NP': '36.252.0.0/15', 'NR': '203.98.224.0/19', 'NU': '49.156.48.0/22', 'NZ': '49.224.0.0/14', 'OM': '5.36.0.0/15', 'PA': '186.72.0.0/15', 'PE': '186.160.0.0/14', 'PF': '123.50.64.0/18', 'PG': '124.240.192.0/19', 'PH': '49.144.0.0/13', 'PK': '39.32.0.0/11', 'PL': '83.0.0.0/11', 'PM': '70.36.0.0/20', 'PR': '66.50.0.0/16', 'PS': '188.161.0.0/16', 'PT': '85.240.0.0/13', 'PW': '202.124.224.0/20', 'PY': '181.120.0.0/14', 'QA': '37.210.0.0/15', 'RE': '139.26.0.0/16', 'RO': '79.112.0.0/13', 'RS': '178.220.0.0/14', 'RU': '5.136.0.0/13', 'RW': '105.178.0.0/15', 'SA': '188.48.0.0/13', 'SB': '202.1.160.0/19', 'SC': '154.192.0.0/11', 'SD': '154.96.0.0/13', 'SE': '78.64.0.0/12', 'SG': '152.56.0.0/14', 'SI': '188.196.0.0/14', 'SK': '78.98.0.0/15', 'SL': '197.215.0.0/17', 'SM': '89.186.32.0/19', 'SN': '41.82.0.0/15', 'SO': '197.220.64.0/19', 'SR': '186.179.128.0/17', 'SS': '105.235.208.0/21', 'ST': '197.159.160.0/19', 'SV': '168.243.0.0/16', 'SX': '190.102.0.0/20', 'SY': '5.0.0.0/16', 'SZ': '41.84.224.0/19', 'TC': '65.255.48.0/20', 'TD': '154.68.128.0/19', 'TG': '196.168.0.0/14', 'TH': '171.96.0.0/13', 'TJ': '85.9.128.0/18', 'TK': '27.96.24.0/21', 'TL': '180.189.160.0/20', 'TM': '95.85.96.0/19', 'TN': '197.0.0.0/11', 'TO': '175.176.144.0/21', 'TR': '78.160.0.0/11', 'TT': '186.44.0.0/15', 'TV': '202.2.96.0/19', 'TW': '120.96.0.0/11', 'TZ': '156.156.0.0/14', 'UA': '93.72.0.0/13', 'UG': '154.224.0.0/13', 'US': '3.0.0.0/8', 'UY': '167.56.0.0/13', 'UZ': '82.215.64.0/18', 'VA': '212.77.0.0/19', 'VC': '24.92.144.0/20', 'VE': '186.88.0.0/13', 'VG': '172.103.64.0/18', 'VI': '146.226.0.0/16', 'VN': '14.160.0.0/11', 'VU': '202.80.32.0/20', 'WF': '117.20.32.0/21', 'WS': '202.4.32.0/19', 'YE': '134.35.0.0/16', 'YT': '41.242.116.0/22', 'ZA': '41.0.0.0/11', 'ZM': '165.56.0.0/13', 'ZW': '41.85.192.0/19', } @classmethod def random_ipv4(cls, code): block = cls._country_ip_map.get(code.upper()) if not block: return None addr, preflen = block.split('/') addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0] addr_max = addr_min | (0xffffffff >> int(preflen)) return compat_str(socket.inet_ntoa( compat_struct_pack('!L', random.randint(addr_min, addr_max)))) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) return compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'): req.add_header('Ytdl-socks-proxy', proxy) # youtube-dl's http/https handlers do wrapping the socket with socks return None return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type) # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is # released into Public Domain # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387 def long_to_bytes(n, blocksize=0): """long_to_bytes(n:long, blocksize:int) : string Convert a long integer to a byte string. If optional blocksize is given and greater than zero, pad the front of the byte string with binary zeros so that the length is a multiple of blocksize. """ # after much testing, this algorithm was deemed to be the fastest s = b'' n = int(n) while n > 0: s = compat_struct_pack('>I', n & 0xffffffff) + s n = n >> 32 # strip off leading zeros for i in range(len(s)): if s[i] != b'\000'[0]: break else: # only happens when n == 0 s = b'\000' i = 0 s = s[i:] # add back some pad bytes. this could be done more efficiently w.r.t. the # de-padding being done above, but sigh... if blocksize > 0 and len(s) % blocksize: s = (blocksize - len(s) % blocksize) * b'\000' + s return s def bytes_to_long(s): """bytes_to_long(string) : long Convert a byte string to a long integer. This is (essentially) the inverse of long_to_bytes(). """ acc = 0 length = len(s) if length % 4: extra = (4 - length % 4) s = b'\000' * extra + s length = length + extra for i in range(0, length, 4): acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0] return acc def ohdave_rsa_encrypt(data, exponent, modulus): ''' Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/ Input: data: data to encrypt, bytes-like object exponent, modulus: parameter e and N of RSA algorithm, both integer Output: hex string of encrypted data Limitation: supports one block encryption only ''' payload = int(binascii.hexlify(data[::-1]), 16) encrypted = pow(payload, exponent, modulus) return '%x' % encrypted def pkcs1pad(data, length): """ Padding input data with PKCS#1 scheme @param {int[]} data input data @param {int} length target length @returns {int[]} padded data """ if len(data) > length - 11: raise ValueError('Input data too long for PKCS#1 padding') pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)] return [0, 2] + pseudo_random + [0] + data def encode_base_n(num, n, table=None): FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' if not table: table = FULL_TABLE[:n] if n > len(table): raise ValueError('base %d exceeds table length %d' % (n, len(table))) if num == 0: return table[0] ret = '' while num: ret = table[num % n] + ret num = num // n return ret def decode_packed_codes(code): mobj = re.search(PACKED_CODES_RE, code) obfucasted_code, base, count, symbols = mobj.groups() base = int(base) count = int(count) symbols = symbols.split('|') symbol_table = {} while count: count -= 1 base_n_count = encode_base_n(count, base) symbol_table[base_n_count] = symbols[count] or base_n_count return re.sub( r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)], obfucasted_code) def parse_m3u8_attributes(attrib): info = {} for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib): if val.startswith('"'): val = val[1:-1] info[key] = val return info def urshift(val, n): return val >> n if val >= 0 else (val + 0x100000000) >> n # Based on png2str() written by @gdkchan and improved by @yokrysty # Originally posted at https://github.com/rg3/youtube-dl/issues/9706 def decode_png(png_data): # Reference: https://www.w3.org/TR/PNG/ header = png_data[8:] if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR': raise IOError('Not a valid PNG file.') int_map = {1: '>B', 2: '>H', 4: '>I'} unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0] chunks = [] while header: length = unpack_integer(header[:4]) header = header[4:] chunk_type = header[:4] header = header[4:] chunk_data = header[:length] header = header[length:] header = header[4:] # Skip CRC chunks.append({ 'type': chunk_type, 'length': length, 'data': chunk_data }) ihdr = chunks[0]['data'] width = unpack_integer(ihdr[:4]) height = unpack_integer(ihdr[4:8]) idat = b'' for chunk in chunks: if chunk['type'] == b'IDAT': idat += chunk['data'] if not idat: raise IOError('Unable to read PNG data.') decompressed_data = bytearray(zlib.decompress(idat)) stride = width * 3 pixels = [] def _get_pixel(idx): x = idx % stride y = idx // stride return pixels[y][x] for y in range(height): basePos = y * (1 + stride) filter_type = decompressed_data[basePos] current_row = [] pixels.append(current_row) for x in range(stride): color = decompressed_data[1 + basePos + x] basex = y * stride + x left = 0 up = 0 if x > 2: left = _get_pixel(basex - 3) if y > 0: up = _get_pixel(basex - stride) if filter_type == 1: # Sub color = (color + left) & 0xff elif filter_type == 2: # Up color = (color + up) & 0xff elif filter_type == 3: # Average color = (color + ((left + up) >> 1)) & 0xff elif filter_type == 4: # Paeth a = left b = up c = 0 if x > 2 and y > 0: c = _get_pixel(basex - stride - 3) p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: color = (color + a) & 0xff elif pb <= pc: color = (color + b) & 0xff else: color = (color + c) & 0xff current_row.append(color) return width, height, pixels def write_xattr(path, key, value): # This mess below finds the best xattr tool for the job try: # try the pyxattr module... import xattr if hasattr(xattr, 'set'): # pyxattr # Unicode arguments are not supported in python-pyxattr until # version 0.5.0 # See https://github.com/rg3/youtube-dl/issues/5498 pyxattr_required_version = '0.5.0' if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version): # TODO: fallback to CLI tools raise XAttrUnavailableError( 'python-pyxattr is detected but is too old. ' 'youtube-dl requires %s or above while your version is %s. ' 'Falling back to other xattr implementations' % ( pyxattr_required_version, xattr.__version__)) setxattr = xattr.set else: # xattr setxattr = xattr.setxattr try: setxattr(path, key, value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) except ImportError: if compat_os_name == 'nt': # Write xattrs to NTFS Alternate Data Streams: # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29 assert ':' not in key assert os.path.exists(path) ads_fn = path + ':' + key try: with open(ads_fn, 'wb') as f: f.write(value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) else: user_has_setfattr = check_executable('setfattr', ['--version']) user_has_xattr = check_executable('xattr', ['-h']) if user_has_setfattr or user_has_xattr: value = value.decode('utf-8') if user_has_setfattr: executable = 'setfattr' opts = ['-n', key, '-v', value] elif user_has_xattr: executable = 'xattr' opts = ['-w', key, value] cmd = ([encodeFilename(executable, True)] + [encodeArgument(o) for o in opts] + [encodeFilename(path, True)]) try: p = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) stdout, stderr = p.communicate() stderr = stderr.decode('utf-8', 'replace') if p.returncode != 0: raise XAttrMetadataError(p.returncode, stderr) else: # On Unix, and can't find pyxattr, setfattr, or xattr. if sys.platform.startswith('linux'): raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'pyxattr' or 'xattr' " "modules, or the GNU 'attr' package " "(which contains the 'setfattr' tool).") else: raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'xattr' module, " "or the 'xattr' binary.") def random_birthday(year_field, month_field, day_field): return { year_field: str(random.randint(1950, 1995)), month_field: str(random.randint(1, 12)), day_field: str(random.randint(1, 31)), } #!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals import base64 import binascii import calendar import codecs import contextlib import ctypes import datetime import email.utils import email.header import errno import functools import gzip import io import itertools import json import locale import math import operator import os import platform import random import re import socket import ssl import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_HTMLParseError, compat_HTMLParser, compat_basestring, compat_chr, compat_etree_fromstring, compat_expanduser, compat_html_entities, compat_html_entities_html5, compat_http_client, compat_kwargs, compat_os_name, compat_parse_qs, compat_shlex_quote, compat_socket_create_connection, compat_str, compat_struct_pack, compat_struct_unpack, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, compat_urllib_parse_unquote_plus, compat_urllib_request, compat_urlparse, compat_xpath, ) from .socks import ( ProxyType, sockssocket, ) def register_socks_protocols(): # "Register" SOCKS protocols # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904 # URLs with protocols not in urlparse.uses_netloc are not handled correctly for scheme in ('socks', 'socks4', 'socks4a', 'socks5'): if scheme not in compat_urlparse.uses_netloc: compat_urlparse.uses_netloc.append(scheme) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } USER_AGENTS = { 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] MONTH_NAMES = { 'en': ENGLISH_MONTH_NAMES, 'fr': [ 'janvier', 'février', 'mars', 'avril', 'mai', 'juin', 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'], } KNOWN_EXTENSIONS = ( 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', 'flv', 'f4v', 'f4a', 'f4b', 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', 'mkv', 'mka', 'mk3d', 'avi', 'divx', 'mov', 'asf', 'wmv', 'wma', '3gp', '3g2', 'mp3', 'flac', 'ape', 'wav', 'f4f', 'f4m', 'm3u8', 'smil') # needed for sanitizing filenames in restricted mode ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'], 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy'))) DATE_FORMATS = ( '%d %B %Y', '%d %b %Y', '%B %d %Y', '%B %dst %Y', '%B %dnd %Y', '%B %dth %Y', '%b %d %Y', '%b %dst %Y', '%b %dnd %Y', '%b %dth %Y', '%b %dst %Y %I:%M', '%b %dnd %Y %I:%M', '%b %dth %Y %I:%M', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', '%b %d %Y at %H:%M', '%b %d %Y at %H:%M:%S', ) DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS) DATE_FORMATS_DAY_FIRST.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d.%m.%y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS) DATE_FORMATS_MONTH_FIRST.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)" def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): for f in node.findall(compat_xpath(xpath)): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): return node.find(compat_xpath(xpath)) if isinstance(xpath, (str, compat_str)): n = _find_xpath(xpath) else: for xp in xpath: n = _find_xpath(xp) if n is not None: break if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute('id', id, html) def get_element_by_class(class_name, html): """Return the content of the first tag with the specified class in the passed HTML document""" retval = get_elements_by_class(class_name, html) return retval[0] if retval else None def get_element_by_attribute(attribute, value, html, escape_value=True): retval = get_elements_by_attribute(attribute, value, html, escape_value) return retval[0] if retval else None def get_elements_by_class(class_name, html): """Return the content of all tags with the specified class in the passed HTML document as a list""" return get_elements_by_attribute( 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name), html, escape_value=False) def get_elements_by_attribute(attribute, value, html, escape_value=True): """Return the content of the tag with the specified attribute in the passed HTML document""" value = re.escape(value) if escape_value else value retlist = [] for m in re.finditer(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), value), html): res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] retlist.append(unescapeHTML(res)) return retlist class HTMLAttributeParser(compat_HTMLParser): """Trivial HTML parser to gather the attributes for a single element""" def __init__(self): self.attrs = {} compat_HTMLParser.__init__(self) def handle_starttag(self, tag, attrs): self.attrs = dict(attrs) def extract_attributes(html_element): """Given a string for an HTML element such as <el a="foo" B="bar" c="&98;az" d=boz empty= noval entity="&amp;" sq='"' dq="'" > Decode and return a dictionary of attributes. { 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz', 'empty': '', 'noval': None, 'entity': '&', 'sq': '"', 'dq': '\'' }. NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions, but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5. """ parser = HTMLAttributeParser() try: parser.feed(html_element) parser.close() # Older Python may throw HTMLParseError in case of malformed HTML except compat_HTMLParseError: pass return parser.attrs def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'(?u)\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'(?u)<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible. """ def replace_insane(char): if restricted and char in ACCENT_CHARS: return ACCENT_CHARS[char] if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of # unwanted failures due to missing protocol def sanitize_url(url): return 'http:%s' % url if url.startswith('//') else url def sanitized_Request(url, *args, **kwargs): return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs) def expand_path(s): """Expand shell variables and ~""" return os.path.expandvars(compat_expanduser(s)) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity_with_semicolon): """Transforms an HTML entity to a character.""" entity = entity_with_semicolon[:-1] # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) # TODO: HTML5 allows entities without a semicolon. For example, # '&Eacuteric' should be decoded as 'Éric'. if entity_with_semicolon in compat_html_entities_html5: return compat_html_entities_html5[entity_with_semicolon] mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 # See https://github.com/rg3/youtube-dl/issues/7518 try: return compat_chr(int(numstr, base)) except ValueError: pass # Unknown entity in name, return its literal representation return '&%s;' % entity def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+;)', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible if sys.platform.startswith('java'): return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class YoutubeDLError(Exception): """Base exception for YoutubeDL errors.""" pass class ExtractorError(YoutubeDLError): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class GeoRestrictedError(ExtractorError): """Geographic restriction Error exception. This exception may be thrown when a video is not available from your geographic location due to geographic restrictions imposed by a website. """ def __init__(self, msg, countries=None): super(GeoRestrictedError, self).__init__(msg, expected=True) self.msg = msg self.countries = countries class DownloadError(YoutubeDLError): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(YoutubeDLError): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(YoutubeDLError): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): super(PostProcessingError, self).__init__(msg) self.msg = msg class MaxDownloadsReached(YoutubeDLError): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(YoutubeDLError): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(YoutubeDLError): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): super(ContentTooShortError, self).__init__( 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected) ) # Both in bytes self.downloaded = downloaded self.expected = expected class XAttrMetadataError(YoutubeDLError): def __init__(self, code=None, msg='Unknown error'): super(XAttrMetadataError, self).__init__(msg) self.code = code self.msg = msg # Parsing code and msg if (self.code in (errno.ENOSPC, errno.EDQUOT) or 'No space left' in self.msg or 'Disk quota excedded' in self.msg): self.reason = 'NO_SPACE' elif self.code == errno.E2BIG or 'Argument list too long' in self.msg: self.reason = 'VALUE_TOO_LONG' else: self.reason = 'NOT_SUPPORTED' class XAttrUnavailableError(YoutubeDLError): pass def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs[b'strict'] = True hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc def handle_youtubedl_headers(headers): filtered_headers = headers if 'Youtubedl-no-compression' in filtered_headers: filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') del filtered_headers['Youtubedl-no-compression'] return filtered_headers class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-no-compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): conn_class = compat_http_client.HTTPConnection socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req = update_Request(req, url=url_escaped) for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) req.headers = handle_youtubedl_headers(req.headers) if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') else: location = location.decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] if sys.version_info < (3, 0): location_escaped = location_escaped.encode('utf-8') resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response def make_socks_conn_class(base_class, socks_proxy): assert issubclass(base_class, ( compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection)) url_components = compat_urlparse.urlparse(socks_proxy) if url_components.scheme.lower() == 'socks5': socks_type = ProxyType.SOCKS5 elif url_components.scheme.lower() in ('socks', 'socks4'): socks_type = ProxyType.SOCKS4 elif url_components.scheme.lower() == 'socks4a': socks_type = ProxyType.SOCKS4A def unquote_if_non_empty(s): if not s: return s return compat_urllib_parse_unquote_plus(s) proxy_args = ( socks_type, url_components.hostname, url_components.port or 1080, True, # Remote DNS unquote_if_non_empty(url_components.username), unquote_if_non_empty(url_components.password), ) class SocksConnection(base_class): def connect(self): self.sock = sockssocket() self.sock.setproxy(*proxy_args) if type(self.timeout) in (int, float): self.sock.settimeout(self.timeout) self.sock.connect((self.host, self.port)) if isinstance(self, compat_http_client.HTTPSConnection): if hasattr(self, '_context'): # Python > 2.6 self.sock = self._context.wrap_socket( self.sock, server_hostname=self.host) else: self.sock = ssl.wrap_socket(self.sock) return SocksConnection class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} conn_class = self._https_conn_class if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, True), req, **kwargs) class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def extract_timezone(date_str): m = re.search( r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group('tz'))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) return timezone, date_str def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: timezone, date_str = extract_timezone(date_str) try: date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) except ValueError: pass def date_formats(day_first=True): return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) _, date_str = extract_timezone(date_str) for expression in date_formats(day_first): try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: try: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') except ValueError: pass if upload_date is not None: return compat_str(upload_date) def unified_timestamp(date_str, day_first=True): if date_str is None: return None date_str = re.sub(r'[,|]', '', date_str) pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0 timezone, date_str = extract_timezone(date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) # Remove unrecognized timezones from ISO 8601 alike timestamps m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str) if m: date_str = date_str[:-len(m.group('tz'))] for expression in date_formats(day_first): try: dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta) return calendar.timegm(dt.timetuple()) except ValueError: pass timetuple = email.utils.parsedate_tz(date_str) if timetuple: return calendar.timegm(timetuple) + pm_delta * 3600 def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download elif guess.rstrip('/') in KNOWN_EXTENSIONS: return guess.rstrip('/') else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad approximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, '%Y%m%d').date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( (b'GetStdHandle', ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( (b'GetConsoleMode', ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return compat_struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: # Some platforms, such as Jython, is missing fcntl try: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) except ImportError: UNSUPPORTED_MSG = 'file locking is not supported on this platform' def _lock_file(f, exclusive): raise IOError(UNSUPPORTED_MSG) def _unlock_file(f): raise IOError(UNSUPPORTED_MSG) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(compat_shlex_quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ url, idata = unsmuggle_url(url, {}) data.update(idata) sdata = compat_urllib_parse_urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def lookup_unit_table(unit_table, s): units_re = '|'.join(re.escape(u) for u in unit_table) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = unit_table[m.group('unit')] return int(float(num_str) * mult) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and unofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'bytes': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'kb': 1000, 'kilobytes': 1000, 'kibibytes': 1024, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'mb': 1000 ** 2, 'megabytes': 1000 ** 2, 'mebibytes': 1024 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'gb': 1000 ** 3, 'gigabytes': 1000 ** 3, 'gibibytes': 1024 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'tb': 1000 ** 4, 'terabytes': 1000 ** 4, 'tebibytes': 1024 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'pb': 1000 ** 5, 'petabytes': 1000 ** 5, 'pebibytes': 1024 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'eb': 1000 ** 6, 'exabytes': 1000 ** 6, 'exbibytes': 1024 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'zb': 1000 ** 7, 'zettabytes': 1000 ** 7, 'zebibytes': 1024 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, 'yb': 1000 ** 8, 'yottabytes': 1000 ** 8, 'yobibytes': 1024 ** 8, } return lookup_unit_table(_UNIT_TABLE, s) def parse_count(s): if s is None: return None s = s.strip() if re.match(r'^[\d,.]+$', s): return str_to_int(s) _UNIT_TABLE = { 'k': 1000, 'K': 1000, 'm': 1000 ** 2, 'M': 1000 ** 2, 'kk': 1000 ** 2, 'KK': 1000 ** 2, } return lookup_unit_table(_UNIT_TABLE, s) def month_by_name(name, lang='en'): """ Return the number of a month by (locale-independently) English name """ month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en']) try: return month_names.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) # ctypes in Jython is not complete # http://bugs.jython.org/issue2148 if sys.platform.startswith('java'): return try: libc = ctypes.cdll.LoadLibrary('libc.so.6') except OSError: return except TypeError: # LoadLibrary in Windows Python 2.7.13 only expects # a bytestring, but since unicode_literals turns # every string into a unicode string, it fails. return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): return s[len(start):] if s is not None and s.startswith(start) else s def remove_end(s, end): return s[:-len(end)] if s is not None and s.endswith(end) else s def remove_quotes(s): if s is None or len(s) < 2: return s for quote in ('"', "'", ): if s[0] == quote and s[-1] == quote: return s[1:-1] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] def base_url(url): return re.match(r'https?://[^?#&]+/', url).group() def urljoin(base, path): if isinstance(path, bytes): path = path.decode('utf-8') if not isinstance(path, compat_str) or not path: return None if re.match(r'^(?:https?:)?//', path): return path if isinstance(base, bytes): base = base.decode('utf-8') if not isinstance(base, compat_str) or not re.match( r'^(?:https?:)?//', base): return None return compat_urlparse.urljoin(base, path) class HEADRequest(compat_urllib_request.Request): def get_method(self): return 'HEAD' class PUTRequest(compat_urllib_request.Request): def get_method(self): return 'PUT' def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def strip_or_none(v): return None if v is None else v.strip() def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() days, hours, mins, secs, ms = [None] * 5 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match( r'''(?ix)(?:P?T)? (?: (?P<days>[0-9]+)\s*d(?:ays?)?\s* )? (?: (?P<hours>[0-9]+)\s*h(?:ours?)?\s* )? (?: (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s* )? (?: (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s* )?Z?$''', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s) if m: hours, mins = m.groups() else: return None duration = 0 if secs: duration += float(secs) if mins: duration += float(mins) * 60 if hours: duration += float(hours) * 60 * 60 if days: duration += float(days) * 24 * 60 * 60 if ms: duration += float(ms) return duration def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers # SIGTTOU if youtube-dl is run in the background. # See https://github.com/rg3/youtube-dl/issues/955#issuecomment-209789656 out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize, use_cache=False): self._pagefunc = pagefunc self._pagesize = pagesize self._use_cache = use_cache if use_cache: self._cache = {} def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = None if self._use_cache: page_results = self._cache.get(pagenum) if page_results is None: page_results = list(self._pagefunc(pagenum)) if self._use_cache: self._cache[pagenum] = page_results startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( netloc=url_parsed.netloc.encode('idna').decode('ascii'), path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii') def update_url_query(url, query): if not query: return url parsed_url = compat_urlparse.urlparse(url) qs = compat_parse_qs(parsed_url.query) qs.update(query) return compat_urlparse.urlunparse(parsed_url._replace( query=compat_urllib_parse_urlencode(qs, True))) def update_Request(req, url=None, data=None, headers={}, query={}): req_headers = req.headers.copy() req_headers.update(headers) req_data = data or req.data req_url = update_url_query(url or req.get_full_url(), query) req_get_method = req.get_method() if req_get_method == 'HEAD': req_type = HEADRequest elif req_get_method == 'PUT': req_type = PUTRequest else: req_type = compat_urllib_request.Request new_req = req_type( req_url, data=req_data, headers=req_headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) if hasattr(req, 'timeout'): new_req.timeout = req.timeout return new_req def _multipart_encode_impl(data, boundary): content_type = 'multipart/form-data; boundary=%s' % boundary out = b'' for k, v in data.items(): out += b'--' + boundary.encode('ascii') + b'\r\n' if isinstance(k, compat_str): k = k.encode('utf-8') if isinstance(v, compat_str): v = v.encode('utf-8') # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578 # suggests sending UTF-8 directly. Firefox sends UTF-8, too content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n' if boundary.encode('ascii') in content: raise ValueError('Boundary overlaps with data') out += content out += b'--' + boundary.encode('ascii') + b'--\r\n' return out, content_type def multipart_encode(data, boundary=None): ''' Encode a dict to RFC 7578-compliant form-data data: A dict where keys and values can be either Unicode or bytes-like objects. boundary: If specified a Unicode object, it's used as the boundary. Otherwise a random boundary is generated. Reference: https://tools.ietf.org/html/rfc7578 ''' has_specified_boundary = boundary is not None while True: if boundary is None: boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff)) try: out, content_type = _multipart_encode_impl(data, boundary) break except ValueError: if has_specified_boundary: raise boundary = None return out, content_type def dict_get(d, key_or_keys, default=None, skip_false_values=True): if isinstance(key_or_keys, (list, tuple)): for key in key_or_keys: if key not in d or d[key] is None or skip_false_values and not d[key]: continue return d[key] return default return d.get(key_or_keys, default) def try_get(src, getter, expected_type=None): if not isinstance(getter, (list, tuple)): getter = [getter] for get in getter: try: v = get(src) except (AttributeError, KeyError, TypeError, IndexError): pass else: if expected_type is None or isinstance(v, expected_type): return v def encode_compat_str(string, encoding=preferredencoding(), errors='strict'): return string if isinstance(string, compat_str) else compat_str(string, encoding, errors) US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } TV_PARENTAL_GUIDELINES = { 'TV-Y': 0, 'TV-Y7': 7, 'TV-G': 0, 'TV-PG': 0, 'TV-14': 14, 'TV-MA': 17, } def parse_age_limit(s): if type(s) == int: return s if 0 <= s <= 21 else None if not isinstance(s, compat_basestring): return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) if m: return int(m.group('age')) if s in US_RATINGS: return US_RATINGS[s] return TV_PARENTAL_GUIDELINES.get(s) def strip_jsonp(code): return re.sub( r'''(?sx)^ (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]+) (?:\s*&&\s*(?P=func_name))? \s*\(\s*(?P<callback_data>.*)\);? \s*?(?://[^\n]*)*$''', r'\g<callback_data>', code) def js_to_json(code): COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*' SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE) INTEGER_TABLE = ( (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16), (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8), ) def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v elif v.startswith('/*') or v.startswith('//') or v == ',': return "" if v[0] in ("'", '"'): v = re.sub(r'(?s)\\.|"', lambda m: { '"': '\\"', "\\'": "'", '\\\n': '', '\\x': '\\u00', }.get(m.group(0), m.group(0)), v[1:-1]) for regex, base in INTEGER_TABLE: im = re.match(regex, v) if im: i = int(im.group(1), base) return '"%d":' % i if v.endswith(':') else '%d' % i return '"%s"' % v return re.sub(r'''(?sx) "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'| {comment}|,(?={skip}[\]}}])| [a-zA-Z_][.a-zA-Z_0-9]*| \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?| [0-9]+(?={skip}:) '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code) def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(compat_shlex_quote(a) for a in args) def error_to_compat_str(err): err_str = str(err) # On python 2 error byte string must be decoded with proper # encoding rather than ascii if sys.version_info[0] < 3: err_str = err_str.decode(preferredencoding()) return err_str def mimetype2ext(mt): if mt is None: return None ext = { 'audio/mp4': 'm4a', # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as # it's the most popular one 'audio/mpeg': 'mp3', }.get(mt) if ext is not None: return ext _, _, res = mt.rpartition('/') res = res.split(';')[0].strip().lower() return { '3gpp': '3gp', 'smptett+xml': 'tt', 'ttaf+xml': 'dfxp', 'ttml+xml': 'ttml', 'x-flv': 'flv', 'x-mp4-fragmented': 'mp4', 'x-ms-wmv': 'wmv', 'mpegurl': 'm3u8', 'x-mpegurl': 'm3u8', 'vnd.apple.mpegurl': 'm3u8', 'dash+xml': 'mpd', 'f4m+xml': 'f4m', 'hds+xml': 'f4m', 'vnd.ms-sstr+xml': 'ism', 'quicktime': 'mov', 'mp2t': 'ts', }.get(res, res) def parse_codecs(codecs_str): # http://tools.ietf.org/html/rfc6381 if not codecs_str: return {} splited_codecs = list(filter(None, map( lambda str: str.strip(), codecs_str.strip().strip(',').split(',')))) vcodec, acodec = None, None for full_codec in splited_codecs: codec = full_codec.split('.')[0] if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v'): if not vcodec: vcodec = full_codec elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'): if not acodec: acodec = full_codec else: write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr) if not vcodec and not acodec: if len(splited_codecs) == 2: return { 'vcodec': vcodec, 'acodec': acodec, } elif len(splited_codecs) == 1: return { 'vcodec': 'none', 'acodec': vcodec, } else: return { 'vcodec': vcodec or 'none', 'acodec': acodec or 'none', } return {} def urlhandle_detect_ext(url_handle): getheader = url_handle.headers.get cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<quote>["\'])(?P<quotedstrval>(?:\\.|(?!(?P=quote)|\\).)+?)(?P=quote)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) if (m.group('quotedstrval') is not None or m.group('strval') is not None or # If the original field is a string and matching comparisonvalue is # a number we should respect the origin of the original field # and process comparison value as a string (see # https://github.com/rg3/youtube-dl/issues/11082). actual_value is not None and m.group('intval') is not None and isinstance(actual_value, compat_str)): if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval') quote = m.group('quote') if quote is not None: comparison_value = comparison_value.replace(r'\%s' % quote, quote) else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.')) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): LEGACY_NAMESPACES = ( ('http://www.w3.org/ns/ttml', [ 'http://www.w3.org/2004/11/ttaf1', 'http://www.w3.org/2006/04/ttaf1', 'http://www.w3.org/2006/10/ttaf1', ]), ('http://www.w3.org/ns/ttml#styling', [ 'http://www.w3.org/ns/ttml#style', ]), ) SUPPORTED_STYLING = [ 'color', 'fontFamily', 'fontSize', 'fontStyle', 'fontWeight', 'textDecoration' ] _x = functools.partial(xpath_with_ns, ns_map={ 'ttml': 'http://www.w3.org/ns/ttml', 'tts': 'http://www.w3.org/ns/ttml#styling', }) styles = {} default_style = {} class TTMLPElementParser(object): _out = '' _unclosed_elements = [] _applied_styles = [] def start(self, tag, attrib): if tag in (_x('ttml:br'), 'br'): self._out += '\n' else: unclosed_elements = [] style = {} element_style_id = attrib.get('style') if default_style: style.update(default_style) if element_style_id: style.update(styles.get(element_style_id, {})) for prop in SUPPORTED_STYLING: prop_val = attrib.get(_x('tts:' + prop)) if prop_val: style[prop] = prop_val if style: font = '' for k, v in sorted(style.items()): if self._applied_styles and self._applied_styles[-1].get(k) == v: continue if k == 'color': font += ' color="%s"' % v elif k == 'fontSize': font += ' size="%s"' % v elif k == 'fontFamily': font += ' face="%s"' % v elif k == 'fontWeight' and v == 'bold': self._out += '<b>' unclosed_elements.append('b') elif k == 'fontStyle' and v == 'italic': self._out += '<i>' unclosed_elements.append('i') elif k == 'textDecoration' and v == 'underline': self._out += '<u>' unclosed_elements.append('u') if font: self._out += '<font' + font + '>' unclosed_elements.append('font') applied_style = {} if self._applied_styles: applied_style.update(self._applied_styles[-1]) applied_style.update(style) self._applied_styles.append(applied_style) self._unclosed_elements.append(unclosed_elements) def end(self, tag): if tag not in (_x('ttml:br'), 'br'): unclosed_elements = self._unclosed_elements.pop() for element in reversed(unclosed_elements): self._out += '</%s>' % element if unclosed_elements and self._applied_styles: self._applied_styles.pop() def data(self, data): self._out += data def close(self): return self._out.strip() def parse_node(node): target = TTMLPElementParser() parser = xml.etree.ElementTree.XMLParser(target=target) parser.feed(xml.etree.ElementTree.tostring(node)) return parser.close() for k, v in LEGACY_NAMESPACES: for ns in v: dfxp_data = dfxp_data.replace(ns, k) dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8')) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') repeat = False while True: for style in dfxp.findall(_x('.//ttml:style')): style_id = style.get('id') parent_style_id = style.get('style') if parent_style_id: if parent_style_id not in styles: repeat = True continue styles[style_id] = styles[parent_style_id].copy() for prop in SUPPORTED_STYLING: prop_val = style.get(_x('tts:' + prop)) if prop_val: styles.setdefault(style_id, {})[prop] = prop_val if repeat: repeat = False else: break for p in ('body', 'div'): ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p]) if ele is None: continue style = styles.get(ele.get('style')) if not style: continue default_style.update(style) for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) dur = parse_dfxp_time_expr(para.attrib.get('dur')) if begin_time is None: continue if not end_time: if not dur: continue end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) if param: param = compat_str(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) if param is None: return [] assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class GeoUtils(object): # Major IPv4 address blocks per country _country_ip_map = { 'AD': '85.94.160.0/19', 'AE': '94.200.0.0/13', 'AF': '149.54.0.0/17', 'AG': '209.59.64.0/18', 'AI': '204.14.248.0/21', 'AL': '46.99.0.0/16', 'AM': '46.70.0.0/15', 'AO': '105.168.0.0/13', 'AP': '159.117.192.0/21', 'AR': '181.0.0.0/12', 'AS': '202.70.112.0/20', 'AT': '84.112.0.0/13', 'AU': '1.128.0.0/11', 'AW': '181.41.0.0/18', 'AZ': '5.191.0.0/16', 'BA': '31.176.128.0/17', 'BB': '65.48.128.0/17', 'BD': '114.130.0.0/16', 'BE': '57.0.0.0/8', 'BF': '129.45.128.0/17', 'BG': '95.42.0.0/15', 'BH': '37.131.0.0/17', 'BI': '154.117.192.0/18', 'BJ': '137.255.0.0/16', 'BL': '192.131.134.0/24', 'BM': '196.12.64.0/18', 'BN': '156.31.0.0/16', 'BO': '161.56.0.0/16', 'BQ': '161.0.80.0/20', 'BR': '152.240.0.0/12', 'BS': '24.51.64.0/18', 'BT': '119.2.96.0/19', 'BW': '168.167.0.0/16', 'BY': '178.120.0.0/13', 'BZ': '179.42.192.0/18', 'CA': '99.224.0.0/11', 'CD': '41.243.0.0/16', 'CF': '196.32.200.0/21', 'CG': '197.214.128.0/17', 'CH': '85.0.0.0/13', 'CI': '154.232.0.0/14', 'CK': '202.65.32.0/19', 'CL': '152.172.0.0/14', 'CM': '165.210.0.0/15', 'CN': '36.128.0.0/10', 'CO': '181.240.0.0/12', 'CR': '201.192.0.0/12', 'CU': '152.206.0.0/15', 'CV': '165.90.96.0/19', 'CW': '190.88.128.0/17', 'CY': '46.198.0.0/15', 'CZ': '88.100.0.0/14', 'DE': '53.0.0.0/8', 'DJ': '197.241.0.0/17', 'DK': '87.48.0.0/12', 'DM': '192.243.48.0/20', 'DO': '152.166.0.0/15', 'DZ': '41.96.0.0/12', 'EC': '186.68.0.0/15', 'EE': '90.190.0.0/15', 'EG': '156.160.0.0/11', 'ER': '196.200.96.0/20', 'ES': '88.0.0.0/11', 'ET': '196.188.0.0/14', 'EU': '2.16.0.0/13', 'FI': '91.152.0.0/13', 'FJ': '144.120.0.0/16', 'FM': '119.252.112.0/20', 'FO': '88.85.32.0/19', 'FR': '90.0.0.0/9', 'GA': '41.158.0.0/15', 'GB': '25.0.0.0/8', 'GD': '74.122.88.0/21', 'GE': '31.146.0.0/16', 'GF': '161.22.64.0/18', 'GG': '62.68.160.0/19', 'GH': '45.208.0.0/14', 'GI': '85.115.128.0/19', 'GL': '88.83.0.0/19', 'GM': '160.182.0.0/15', 'GN': '197.149.192.0/18', 'GP': '104.250.0.0/19', 'GQ': '105.235.224.0/20', 'GR': '94.64.0.0/13', 'GT': '168.234.0.0/16', 'GU': '168.123.0.0/16', 'GW': '197.214.80.0/20', 'GY': '181.41.64.0/18', 'HK': '113.252.0.0/14', 'HN': '181.210.0.0/16', 'HR': '93.136.0.0/13', 'HT': '148.102.128.0/17', 'HU': '84.0.0.0/14', 'ID': '39.192.0.0/10', 'IE': '87.32.0.0/12', 'IL': '79.176.0.0/13', 'IM': '5.62.80.0/20', 'IN': '117.192.0.0/10', 'IO': '203.83.48.0/21', 'IQ': '37.236.0.0/14', 'IR': '2.176.0.0/12', 'IS': '82.221.0.0/16', 'IT': '79.0.0.0/10', 'JE': '87.244.64.0/18', 'JM': '72.27.0.0/17', 'JO': '176.29.0.0/16', 'JP': '126.0.0.0/8', 'KE': '105.48.0.0/12', 'KG': '158.181.128.0/17', 'KH': '36.37.128.0/17', 'KI': '103.25.140.0/22', 'KM': '197.255.224.0/20', 'KN': '198.32.32.0/19', 'KP': '175.45.176.0/22', 'KR': '175.192.0.0/10', 'KW': '37.36.0.0/14', 'KY': '64.96.0.0/15', 'KZ': '2.72.0.0/13', 'LA': '115.84.64.0/18', 'LB': '178.135.0.0/16', 'LC': '192.147.231.0/24', 'LI': '82.117.0.0/19', 'LK': '112.134.0.0/15', 'LR': '41.86.0.0/19', 'LS': '129.232.0.0/17', 'LT': '78.56.0.0/13', 'LU': '188.42.0.0/16', 'LV': '46.109.0.0/16', 'LY': '41.252.0.0/14', 'MA': '105.128.0.0/11', 'MC': '88.209.64.0/18', 'MD': '37.246.0.0/16', 'ME': '178.175.0.0/17', 'MF': '74.112.232.0/21', 'MG': '154.126.0.0/17', 'MH': '117.103.88.0/21', 'MK': '77.28.0.0/15', 'ML': '154.118.128.0/18', 'MM': '37.111.0.0/17', 'MN': '49.0.128.0/17', 'MO': '60.246.0.0/16', 'MP': '202.88.64.0/20', 'MQ': '109.203.224.0/19', 'MR': '41.188.64.0/18', 'MS': '208.90.112.0/22', 'MT': '46.11.0.0/16', 'MU': '105.16.0.0/12', 'MV': '27.114.128.0/18', 'MW': '105.234.0.0/16', 'MX': '187.192.0.0/11', 'MY': '175.136.0.0/13', 'MZ': '197.218.0.0/15', 'NA': '41.182.0.0/16', 'NC': '101.101.0.0/18', 'NE': '197.214.0.0/18', 'NF': '203.17.240.0/22', 'NG': '105.112.0.0/12', 'NI': '186.76.0.0/15', 'NL': '145.96.0.0/11', 'NO': '84.208.0.0/13', 'NP': '36.252.0.0/15', 'NR': '203.98.224.0/19', 'NU': '49.156.48.0/22', 'NZ': '49.224.0.0/14', 'OM': '5.36.0.0/15', 'PA': '186.72.0.0/15', 'PE': '186.160.0.0/14', 'PF': '123.50.64.0/18', 'PG': '124.240.192.0/19', 'PH': '49.144.0.0/13', 'PK': '39.32.0.0/11', 'PL': '83.0.0.0/11', 'PM': '70.36.0.0/20', 'PR': '66.50.0.0/16', 'PS': '188.161.0.0/16', 'PT': '85.240.0.0/13', 'PW': '202.124.224.0/20', 'PY': '181.120.0.0/14', 'QA': '37.210.0.0/15', 'RE': '139.26.0.0/16', 'RO': '79.112.0.0/13', 'RS': '178.220.0.0/14', 'RU': '5.136.0.0/13', 'RW': '105.178.0.0/15', 'SA': '188.48.0.0/13', 'SB': '202.1.160.0/19', 'SC': '154.192.0.0/11', 'SD': '154.96.0.0/13', 'SE': '78.64.0.0/12', 'SG': '152.56.0.0/14', 'SI': '188.196.0.0/14', 'SK': '78.98.0.0/15', 'SL': '197.215.0.0/17', 'SM': '89.186.32.0/19', 'SN': '41.82.0.0/15', 'SO': '197.220.64.0/19', 'SR': '186.179.128.0/17', 'SS': '105.235.208.0/21', 'ST': '197.159.160.0/19', 'SV': '168.243.0.0/16', 'SX': '190.102.0.0/20', 'SY': '5.0.0.0/16', 'SZ': '41.84.224.0/19', 'TC': '65.255.48.0/20', 'TD': '154.68.128.0/19', 'TG': '196.168.0.0/14', 'TH': '171.96.0.0/13', 'TJ': '85.9.128.0/18', 'TK': '27.96.24.0/21', 'TL': '180.189.160.0/20', 'TM': '95.85.96.0/19', 'TN': '197.0.0.0/11', 'TO': '175.176.144.0/21', 'TR': '78.160.0.0/11', 'TT': '186.44.0.0/15', 'TV': '202.2.96.0/19', 'TW': '120.96.0.0/11', 'TZ': '156.156.0.0/14', 'UA': '93.72.0.0/13', 'UG': '154.224.0.0/13', 'US': '3.0.0.0/8', 'UY': '167.56.0.0/13', 'UZ': '82.215.64.0/18', 'VA': '212.77.0.0/19', 'VC': '24.92.144.0/20', 'VE': '186.88.0.0/13', 'VG': '172.103.64.0/18', 'VI': '146.226.0.0/16', 'VN': '14.160.0.0/11', 'VU': '202.80.32.0/20', 'WF': '117.20.32.0/21', 'WS': '202.4.32.0/19', 'YE': '134.35.0.0/16', 'YT': '41.242.116.0/22', 'ZA': '41.0.0.0/11', 'ZM': '165.56.0.0/13', 'ZW': '41.85.192.0/19', } @classmethod def random_ipv4(cls, code): block = cls._country_ip_map.get(code.upper()) if not block: return None addr, preflen = block.split('/') addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0] addr_max = addr_min | (0xffffffff >> int(preflen)) return compat_str(socket.inet_ntoa( compat_struct_pack('!L', random.randint(addr_min, addr_max)))) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) return compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'): req.add_header('Ytdl-socks-proxy', proxy) # youtube-dl's http/https handlers do wrapping the socket with socks return None return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type) # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is # released into Public Domain # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387 def long_to_bytes(n, blocksize=0): """long_to_bytes(n:long, blocksize:int) : string Convert a long integer to a byte string. If optional blocksize is given and greater than zero, pad the front of the byte string with binary zeros so that the length is a multiple of blocksize. """ # after much testing, this algorithm was deemed to be the fastest s = b'' n = int(n) while n > 0: s = compat_struct_pack('>I', n & 0xffffffff) + s n = n >> 32 # strip off leading zeros for i in range(len(s)): if s[i] != b'\000'[0]: break else: # only happens when n == 0 s = b'\000' i = 0 s = s[i:] # add back some pad bytes. this could be done more efficiently w.r.t. the # de-padding being done above, but sigh... if blocksize > 0 and len(s) % blocksize: s = (blocksize - len(s) % blocksize) * b'\000' + s return s def bytes_to_long(s): """bytes_to_long(string) : long Convert a byte string to a long integer. This is (essentially) the inverse of long_to_bytes(). """ acc = 0 length = len(s) if length % 4: extra = (4 - length % 4) s = b'\000' * extra + s length = length + extra for i in range(0, length, 4): acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0] return acc def ohdave_rsa_encrypt(data, exponent, modulus): ''' Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/ Input: data: data to encrypt, bytes-like object exponent, modulus: parameter e and N of RSA algorithm, both integer Output: hex string of encrypted data Limitation: supports one block encryption only ''' payload = int(binascii.hexlify(data[::-1]), 16) encrypted = pow(payload, exponent, modulus) return '%x' % encrypted def pkcs1pad(data, length): """ Padding input data with PKCS#1 scheme @param {int[]} data input data @param {int} length target length @returns {int[]} padded data """ if len(data) > length - 11: raise ValueError('Input data too long for PKCS#1 padding') pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)] return [0, 2] + pseudo_random + [0] + data def encode_base_n(num, n, table=None): FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' if not table: table = FULL_TABLE[:n] if n > len(table): raise ValueError('base %d exceeds table length %d' % (n, len(table))) if num == 0: return table[0] ret = '' while num: ret = table[num % n] + ret num = num // n return ret def decode_packed_codes(code): mobj = re.search(PACKED_CODES_RE, code) obfucasted_code, base, count, symbols = mobj.groups() base = int(base) count = int(count) symbols = symbols.split('|') symbol_table = {} while count: count -= 1 base_n_count = encode_base_n(count, base) symbol_table[base_n_count] = symbols[count] or base_n_count return re.sub( r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)], obfucasted_code) def parse_m3u8_attributes(attrib): info = {} for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib): if val.startswith('"'): val = val[1:-1] info[key] = val return info def urshift(val, n): return val >> n if val >= 0 else (val + 0x100000000) >> n # Based on png2str() written by @gdkchan and improved by @yokrysty # Originally posted at https://github.com/rg3/youtube-dl/issues/9706 def decode_png(png_data): # Reference: https://www.w3.org/TR/PNG/ header = png_data[8:] if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR': raise IOError('Not a valid PNG file.') int_map = {1: '>B', 2: '>H', 4: '>I'} unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0] chunks = [] while header: length = unpack_integer(header[:4]) header = header[4:] chunk_type = header[:4] header = header[4:] chunk_data = header[:length] header = header[length:] header = header[4:] # Skip CRC chunks.append({ 'type': chunk_type, 'length': length, 'data': chunk_data }) ihdr = chunks[0]['data'] width = unpack_integer(ihdr[:4]) height = unpack_integer(ihdr[4:8]) idat = b'' for chunk in chunks: if chunk['type'] == b'IDAT': idat += chunk['data'] if not idat: raise IOError('Unable to read PNG data.') decompressed_data = bytearray(zlib.decompress(idat)) stride = width * 3 pixels = [] def _get_pixel(idx): x = idx % stride y = idx // stride return pixels[y][x] for y in range(height): basePos = y * (1 + stride) filter_type = decompressed_data[basePos] current_row = [] pixels.append(current_row) for x in range(stride): color = decompressed_data[1 + basePos + x] basex = y * stride + x left = 0 up = 0 if x > 2: left = _get_pixel(basex - 3) if y > 0: up = _get_pixel(basex - stride) if filter_type == 1: # Sub color = (color + left) & 0xff elif filter_type == 2: # Up color = (color + up) & 0xff elif filter_type == 3: # Average color = (color + ((left + up) >> 1)) & 0xff elif filter_type == 4: # Paeth a = left b = up c = 0 if x > 2 and y > 0: c = _get_pixel(basex - stride - 3) p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: color = (color + a) & 0xff elif pb <= pc: color = (color + b) & 0xff else: color = (color + c) & 0xff current_row.append(color) return width, height, pixels def write_xattr(path, key, value): # This mess below finds the best xattr tool for the job try: # try the pyxattr module... import xattr if hasattr(xattr, 'set'): # pyxattr # Unicode arguments are not supported in python-pyxattr until # version 0.5.0 # See https://github.com/rg3/youtube-dl/issues/5498 pyxattr_required_version = '0.5.0' if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version): # TODO: fallback to CLI tools raise XAttrUnavailableError( 'python-pyxattr is detected but is too old. ' 'youtube-dl requires %s or above while your version is %s. ' 'Falling back to other xattr implementations' % ( pyxattr_required_version, xattr.__version__)) setxattr = xattr.set else: # xattr setxattr = xattr.setxattr try: setxattr(path, key, value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) except ImportError: if compat_os_name == 'nt': # Write xattrs to NTFS Alternate Data Streams: # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29 assert ':' not in key assert os.path.exists(path) ads_fn = path + ':' + key try: with open(ads_fn, 'wb') as f: f.write(value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) else: user_has_setfattr = check_executable('setfattr', ['--version']) user_has_xattr = check_executable('xattr', ['-h']) if user_has_setfattr or user_has_xattr: value = value.decode('utf-8') if user_has_setfattr: executable = 'setfattr' opts = ['-n', key, '-v', value] elif user_has_xattr: executable = 'xattr' opts = ['-w', key, value] cmd = ([encodeFilename(executable, True)] + [encodeArgument(o) for o in opts] + [encodeFilename(path, True)]) try: p = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) stdout, stderr = p.communicate() stderr = stderr.decode('utf-8', 'replace') if p.returncode != 0: raise XAttrMetadataError(p.returncode, stderr) else: # On Unix, and can't find pyxattr, setfattr, or xattr. if sys.platform.startswith('linux'): raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'pyxattr' or 'xattr' " "modules, or the GNU 'attr' package " "(which contains the 'setfattr' tool).") else: raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'xattr' module, " "or the 'xattr' binary.") def random_birthday(year_field, month_field, day_field): return { year_field: str(random.randint(1950, 1995)), month_field: str(random.randint(1, 12)), day_field: str(random.randint(1, 31)), }
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-17-fixed/youtube-dl/youtube_dl/utils.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-17-buggy/youtube-dl/youtube_dl/utils.py
youtube-dl-bug-21
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals import base64 import binascii import calendar import codecs import contextlib import ctypes import datetime import email.utils import errno import functools import gzip import io import itertools import json import locale import math import operator import os import pipes import platform import random import re import socket import ssl import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_HTMLParser, compat_basestring, compat_chr, compat_etree_fromstring, compat_html_entities, compat_html_entities_html5, compat_http_client, compat_kwargs, compat_os_name, compat_parse_qs, compat_shlex_quote, compat_socket_create_connection, compat_str, compat_struct_pack, compat_struct_unpack, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, compat_urllib_parse_unquote_plus, compat_urllib_request, compat_urlparse, compat_xpath, ) from .socks import ( ProxyType, sockssocket, ) def register_socks_protocols(): # "Register" SOCKS protocols # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904 # URLs with protocols not in urlparse.uses_netloc are not handled correctly for scheme in ('socks', 'socks4', 'socks4a', 'socks5'): if scheme not in compat_urlparse.uses_netloc: compat_urlparse.uses_netloc.append(scheme) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } USER_AGENTS = { 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] MONTH_NAMES = { 'en': ENGLISH_MONTH_NAMES, 'fr': [ 'janvier', 'février', 'mars', 'avril', 'mai', 'juin', 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'], } KNOWN_EXTENSIONS = ( 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', 'flv', 'f4v', 'f4a', 'f4b', 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', 'mkv', 'mka', 'mk3d', 'avi', 'divx', 'mov', 'asf', 'wmv', 'wma', '3gp', '3g2', 'mp3', 'flac', 'ape', 'wav', 'f4f', 'f4m', 'm3u8', 'smil') # needed for sanitizing filenames in restricted mode ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'], 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy'))) DATE_FORMATS = ( '%d %B %Y', '%d %b %Y', '%B %d %Y', '%B %dst %Y', '%B %dnd %Y', '%B %dth %Y', '%b %d %Y', '%b %dst %Y', '%b %dnd %Y', '%b %dth %Y', '%b %dst %Y %I:%M', '%b %dnd %Y %I:%M', '%b %dth %Y %I:%M', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', '%b %d %Y at %H:%M', '%b %d %Y at %H:%M:%S', ) DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS) DATE_FORMATS_DAY_FIRST.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d.%m.%y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS) DATE_FORMATS_MONTH_FIRST.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)" def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): for f in node.findall(compat_xpath(xpath)): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): return node.find(compat_xpath(xpath)) if isinstance(xpath, (str, compat_str)): n = _find_xpath(xpath) else: for xp in xpath: n = _find_xpath(xp) if n is not None: break if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute('id', id, html) def get_element_by_class(class_name, html): """Return the content of the first tag with the specified class in the passed HTML document""" retval = get_elements_by_class(class_name, html) return retval[0] if retval else None def get_element_by_attribute(attribute, value, html, escape_value=True): retval = get_elements_by_attribute(attribute, value, html, escape_value) return retval[0] if retval else None def get_elements_by_class(class_name, html): """Return the content of all tags with the specified class in the passed HTML document as a list""" return get_elements_by_attribute( 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name), html, escape_value=False) def get_elements_by_attribute(attribute, value, html, escape_value=True): """Return the content of the tag with the specified attribute in the passed HTML document""" value = re.escape(value) if escape_value else value retlist = [] for m in re.finditer(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), value), html): res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] retlist.append(unescapeHTML(res)) return retlist class HTMLAttributeParser(compat_HTMLParser): """Trivial HTML parser to gather the attributes for a single element""" def __init__(self): self.attrs = {} compat_HTMLParser.__init__(self) def handle_starttag(self, tag, attrs): self.attrs = dict(attrs) def extract_attributes(html_element): """Given a string for an HTML element such as <el a="foo" B="bar" c="&98;az" d=boz empty= noval entity="&amp;" sq='"' dq="'" > Decode and return a dictionary of attributes. { 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz', 'empty': '', 'noval': None, 'entity': '&', 'sq': '"', 'dq': '\'' }. NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions, but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5. """ parser = HTMLAttributeParser() parser.feed(html_element) parser.close() return parser.attrs def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible. """ def replace_insane(char): if restricted and char in ACCENT_CHARS: return ACCENT_CHARS[char] if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of # unwanted failures due to missing protocol def sanitize_url(url): return 'http:%s' % url if url.startswith('//') else url def sanitized_Request(url, *args, **kwargs): return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity_with_semicolon): """Transforms an HTML entity to a character.""" entity = entity_with_semicolon[:-1] # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) # TODO: HTML5 allows entities without a semicolon. For example, # '&Eacuteric' should be decoded as 'Éric'. if entity_with_semicolon in compat_html_entities_html5: return compat_html_entities_html5[entity_with_semicolon] mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 # See https://github.com/rg3/youtube-dl/issues/7518 try: return compat_chr(int(numstr, base)) except ValueError: pass # Unknown entity in name, return its literal representation return '&%s;' % entity def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+;)', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible if sys.platform.startswith('java'): return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class YoutubeDLError(Exception): """Base exception for YoutubeDL errors.""" pass class ExtractorError(YoutubeDLError): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class GeoRestrictedError(ExtractorError): """Geographic restriction Error exception. This exception may be thrown when a video is not available from your geographic location due to geographic restrictions imposed by a website. """ def __init__(self, msg, countries=None): super(GeoRestrictedError, self).__init__(msg, expected=True) self.msg = msg self.countries = countries class DownloadError(YoutubeDLError): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(YoutubeDLError): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(YoutubeDLError): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): super(PostProcessingError, self).__init__(msg) self.msg = msg class MaxDownloadsReached(YoutubeDLError): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(YoutubeDLError): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(YoutubeDLError): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): super(ContentTooShortError, self).__init__( 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected) ) # Both in bytes self.downloaded = downloaded self.expected = expected class XAttrMetadataError(YoutubeDLError): def __init__(self, code=None, msg='Unknown error'): super(XAttrMetadataError, self).__init__(msg) self.code = code self.msg = msg # Parsing code and msg if (self.code in (errno.ENOSPC, errno.EDQUOT) or 'No space left' in self.msg or 'Disk quota excedded' in self.msg): self.reason = 'NO_SPACE' elif self.code == errno.E2BIG or 'Argument list too long' in self.msg: self.reason = 'VALUE_TOO_LONG' else: self.reason = 'NOT_SUPPORTED' class XAttrUnavailableError(YoutubeDLError): pass def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs[b'strict'] = True hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc def handle_youtubedl_headers(headers): filtered_headers = headers if 'Youtubedl-no-compression' in filtered_headers: filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') del filtered_headers['Youtubedl-no-compression'] return filtered_headers class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-no-compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): conn_class = compat_http_client.HTTPConnection socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req = update_Request(req, url=url_escaped) for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) req.headers = handle_youtubedl_headers(req.headers) if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') else: location = location.decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] if sys.version_info < (3, 0): location_escaped = location_escaped.encode('utf-8') resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response def make_socks_conn_class(base_class, socks_proxy): assert issubclass(base_class, ( compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection)) url_components = compat_urlparse.urlparse(socks_proxy) if url_components.scheme.lower() == 'socks5': socks_type = ProxyType.SOCKS5 elif url_components.scheme.lower() in ('socks', 'socks4'): socks_type = ProxyType.SOCKS4 elif url_components.scheme.lower() == 'socks4a': socks_type = ProxyType.SOCKS4A def unquote_if_non_empty(s): if not s: return s return compat_urllib_parse_unquote_plus(s) proxy_args = ( socks_type, url_components.hostname, url_components.port or 1080, True, # Remote DNS unquote_if_non_empty(url_components.username), unquote_if_non_empty(url_components.password), ) class SocksConnection(base_class): def connect(self): self.sock = sockssocket() self.sock.setproxy(*proxy_args) if type(self.timeout) in (int, float): self.sock.settimeout(self.timeout) self.sock.connect((self.host, self.port)) if isinstance(self, compat_http_client.HTTPSConnection): if hasattr(self, '_context'): # Python > 2.6 self.sock = self._context.wrap_socket( self.sock, server_hostname=self.host) else: self.sock = ssl.wrap_socket(self.sock) return SocksConnection class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} conn_class = self._https_conn_class if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, True), req, **kwargs) class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def extract_timezone(date_str): m = re.search( r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group('tz'))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) return timezone, date_str def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: timezone, date_str = extract_timezone(date_str) try: date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) except ValueError: pass def date_formats(day_first=True): return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) _, date_str = extract_timezone(date_str) for expression in date_formats(day_first): try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: try: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') except ValueError: pass if upload_date is not None: return compat_str(upload_date) def unified_timestamp(date_str, day_first=True): if date_str is None: return None date_str = date_str.replace(',', ' ') pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0 timezone, date_str = extract_timezone(date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) for expression in date_formats(day_first): try: dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta) return calendar.timegm(dt.timetuple()) except ValueError: pass timetuple = email.utils.parsedate_tz(date_str) if timetuple: return calendar.timegm(timetuple) + pm_delta * 3600 def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download elif guess.rstrip('/') in KNOWN_EXTENSIONS: return guess.rstrip('/') else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad approximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, '%Y%m%d').date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( (b'GetStdHandle', ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( (b'GetConsoleMode', ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return compat_struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: # Some platforms, such as Jython, is missing fcntl try: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) except ImportError: UNSUPPORTED_MSG = 'file locking is not supported on this platform' def _lock_file(f, exclusive): raise IOError(UNSUPPORTED_MSG) def _unlock_file(f): raise IOError(UNSUPPORTED_MSG) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ url, idata = unsmuggle_url(url, {}) data.update(idata) sdata = compat_urllib_parse_urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def lookup_unit_table(unit_table, s): units_re = '|'.join(re.escape(u) for u in unit_table) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = unit_table[m.group('unit')] return int(float(num_str) * mult) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and unofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'bytes': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'kb': 1000, 'kilobytes': 1000, 'kibibytes': 1024, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'mb': 1000 ** 2, 'megabytes': 1000 ** 2, 'mebibytes': 1024 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'gb': 1000 ** 3, 'gigabytes': 1000 ** 3, 'gibibytes': 1024 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'tb': 1000 ** 4, 'terabytes': 1000 ** 4, 'tebibytes': 1024 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'pb': 1000 ** 5, 'petabytes': 1000 ** 5, 'pebibytes': 1024 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'eb': 1000 ** 6, 'exabytes': 1000 ** 6, 'exbibytes': 1024 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'zb': 1000 ** 7, 'zettabytes': 1000 ** 7, 'zebibytes': 1024 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, 'yb': 1000 ** 8, 'yottabytes': 1000 ** 8, 'yobibytes': 1024 ** 8, } return lookup_unit_table(_UNIT_TABLE, s) def parse_count(s): if s is None: return None s = s.strip() if re.match(r'^[\d,.]+$', s): return str_to_int(s) _UNIT_TABLE = { 'k': 1000, 'K': 1000, 'm': 1000 ** 2, 'M': 1000 ** 2, 'kk': 1000 ** 2, 'KK': 1000 ** 2, } return lookup_unit_table(_UNIT_TABLE, s) def month_by_name(name, lang='en'): """ Return the number of a month by (locale-independently) English name """ month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en']) try: return month_names.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) # ctypes in Jython is not complete # http://bugs.jython.org/issue2148 if sys.platform.startswith('java'): return try: libc = ctypes.cdll.LoadLibrary('libc.so.6') except OSError: return except TypeError: # LoadLibrary in Windows Python 2.7.13 only expects # a bytestring, but since unicode_literals turns # every string into a unicode string, it fails. return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): return s[len(start):] if s is not None and s.startswith(start) else s def remove_end(s, end): return s[:-len(end)] if s is not None and s.endswith(end) else s def remove_quotes(s): if s is None or len(s) < 2: return s for quote in ('"', "'", ): if s[0] == quote and s[-1] == quote: return s[1:-1] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] def base_url(url): return re.match(r'https?://[^?#&]+/', url).group() def urljoin(base, path): if not isinstance(path, compat_str) or not path: return None if re.match(r'^(?:https?:)?//', path): return path if not isinstance(base, compat_str) or not re.match(r'^(?:https?:)?//', base): return None return compat_urlparse.urljoin(base, path) class HEADRequest(compat_urllib_request.Request): def get_method(self): return 'HEAD' class PUTRequest(compat_urllib_request.Request): def get_method(self): return 'PUT' def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def strip_or_none(v): return None if v is None else v.strip() def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() days, hours, mins, secs, ms = [None] * 5 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match( r'''(?ix)(?:P?T)? (?: (?P<days>[0-9]+)\s*d(?:ays?)?\s* )? (?: (?P<hours>[0-9]+)\s*h(?:ours?)?\s* )? (?: (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s* )? (?: (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s* )?Z?$''', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s) if m: hours, mins = m.groups() else: return None duration = 0 if secs: duration += float(secs) if mins: duration += float(mins) * 60 if hours: duration += float(hours) * 60 * 60 if days: duration += float(days) * 24 * 60 * 60 if ms: duration += float(ms) return duration def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers # SIGTTOU if youtube-dl is run in the background. # See https://github.com/rg3/youtube-dl/issues/955#issuecomment-209789656 out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize, use_cache=False): self._pagefunc = pagefunc self._pagesize = pagesize self._use_cache = use_cache if use_cache: self._cache = {} def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = None if self._use_cache: page_results = self._cache.get(pagenum) if page_results is None: page_results = list(self._pagefunc(pagenum)) if self._use_cache: self._cache[pagenum] = page_results startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( netloc=url_parsed.netloc.encode('idna').decode('ascii'), path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii') def update_url_query(url, query): if not query: return url parsed_url = compat_urlparse.urlparse(url) qs = compat_parse_qs(parsed_url.query) qs.update(query) return compat_urlparse.urlunparse(parsed_url._replace( query=compat_urllib_parse_urlencode(qs, True))) def update_Request(req, url=None, data=None, headers={}, query={}): req_headers = req.headers.copy() req_headers.update(headers) req_data = data or req.data req_url = update_url_query(url or req.get_full_url(), query) req_get_method = req.get_method() if req_get_method == 'HEAD': req_type = HEADRequest elif req_get_method == 'PUT': req_type = PUTRequest else: req_type = compat_urllib_request.Request new_req = req_type( req_url, data=req_data, headers=req_headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) if hasattr(req, 'timeout'): new_req.timeout = req.timeout return new_req def dict_get(d, key_or_keys, default=None, skip_false_values=True): if isinstance(key_or_keys, (list, tuple)): for key in key_or_keys: if key not in d or d[key] is None or skip_false_values and not d[key]: continue return d[key] return default return d.get(key_or_keys, default) def try_get(src, getter, expected_type=None): try: v = getter(src) except (AttributeError, KeyError, TypeError, IndexError): pass else: if expected_type is None or isinstance(v, expected_type): return v def encode_compat_str(string, encoding=preferredencoding(), errors='strict'): return string if isinstance(string, compat_str) else compat_str(string, encoding, errors) US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } TV_PARENTAL_GUIDELINES = { 'TV-Y': 0, 'TV-Y7': 7, 'TV-G': 0, 'TV-PG': 0, 'TV-14': 14, 'TV-MA': 17, } def parse_age_limit(s): if type(s) == int: return s if 0 <= s <= 21 else None if not isinstance(s, compat_basestring): return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) if m: return int(m.group('age')) if s in US_RATINGS: return US_RATINGS[s] return TV_PARENTAL_GUIDELINES.get(s) def strip_jsonp(code): return re.sub( r'(?s)^[a-zA-Z0-9_.$]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code) def js_to_json(code): COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*' SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE) INTEGER_TABLE = ( (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16), (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8), ) def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v elif v.startswith('/*') or v.startswith('//') or v == ',': return "" if v[0] in ("'", '"'): v = re.sub(r'(?s)\\.|"', lambda m: { '"': '\\"', "\\'": "'", '\\\n': '', '\\x': '\\u00', }.get(m.group(0), m.group(0)), v[1:-1]) for regex, base in INTEGER_TABLE: im = re.match(regex, v) if im: i = int(im.group(1), base) return '"%d":' % i if v.endswith(':') else '%d' % i return '"%s"' % v return re.sub(r'''(?sx) "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'| {comment}|,(?={skip}[\]}}])| [a-zA-Z_][.a-zA-Z_0-9]*| \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?| [0-9]+(?={skip}:) '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code) def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(compat_shlex_quote(a) for a in args) def error_to_compat_str(err): err_str = str(err) # On python 2 error byte string must be decoded with proper # encoding rather than ascii if sys.version_info[0] < 3: err_str = err_str.decode(preferredencoding()) return err_str def mimetype2ext(mt): if mt is None: return None ext = { 'audio/mp4': 'm4a', # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as # it's the most popular one 'audio/mpeg': 'mp3', }.get(mt) if ext is not None: return ext _, _, res = mt.rpartition('/') res = res.split(';')[0].strip().lower() return { '3gpp': '3gp', 'smptett+xml': 'tt', 'srt': 'srt', 'ttaf+xml': 'dfxp', 'ttml+xml': 'ttml', 'vtt': 'vtt', 'x-flv': 'flv', 'x-mp4-fragmented': 'mp4', 'x-ms-wmv': 'wmv', 'mpegurl': 'm3u8', 'x-mpegurl': 'm3u8', 'vnd.apple.mpegurl': 'm3u8', 'dash+xml': 'mpd', 'f4m': 'f4m', 'f4m+xml': 'f4m', 'hds+xml': 'f4m', 'vnd.ms-sstr+xml': 'ism', 'quicktime': 'mov', }.get(res, res) def parse_codecs(codecs_str): # http://tools.ietf.org/html/rfc6381 if not codecs_str: return {} splited_codecs = list(filter(None, map( lambda str: str.strip(), codecs_str.strip().strip(',').split(',')))) vcodec, acodec = None, None for full_codec in splited_codecs: codec = full_codec.split('.')[0] if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v'): if not vcodec: vcodec = full_codec elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3'): if not acodec: acodec = full_codec else: write_string('WARNING: Unknown codec %s' % full_codec, sys.stderr) if not vcodec and not acodec: if len(splited_codecs) == 2: return { 'vcodec': vcodec, 'acodec': acodec, } elif len(splited_codecs) == 1: return { 'vcodec': 'none', 'acodec': vcodec, } else: return { 'vcodec': vcodec or 'none', 'acodec': acodec or 'none', } return {} def urlhandle_detect_ext(url_handle): getheader = url_handle.headers.get cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<quote>["\'])(?P<quotedstrval>(?:\\.|(?!(?P=quote)|\\).)+?)(?P=quote)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) if (m.group('quotedstrval') is not None or m.group('strval') is not None or # If the original field is a string and matching comparisonvalue is # a number we should respect the origin of the original field # and process comparison value as a string (see # https://github.com/rg3/youtube-dl/issues/11082). actual_value is not None and m.group('intval') is not None and isinstance(actual_value, compat_str)): if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval') quote = m.group('quote') if quote is not None: comparison_value = comparison_value.replace(r'\%s' % quote, quote) else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.')) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): _x = functools.partial(xpath_with_ns, ns_map={ 'ttml': 'http://www.w3.org/ns/ttml', 'ttaf1': 'http://www.w3.org/2006/10/ttaf1', 'ttaf1_0604': 'http://www.w3.org/2006/04/ttaf1', }) class TTMLPElementParser(object): out = '' def start(self, tag, attrib): if tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'): self.out += '\n' def end(self, tag): pass def data(self, data): self.out += data def close(self): return self.out.strip() def parse_node(node): target = TTMLPElementParser() parser = xml.etree.ElementTree.XMLParser(target=target) parser.feed(xml.etree.ElementTree.tostring(node)) return parser.close() dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8')) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall(_x('.//ttaf1_0604:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) dur = parse_dfxp_time_expr(para.attrib.get('dur')) if begin_time is None: continue if not end_time: if not dur: continue end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) if param: param = compat_str(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class GeoUtils(object): # Major IPv4 address blocks per country _country_ip_map = { 'AD': '85.94.160.0/19', 'AE': '94.200.0.0/13', 'AF': '149.54.0.0/17', 'AG': '209.59.64.0/18', 'AI': '204.14.248.0/21', 'AL': '46.99.0.0/16', 'AM': '46.70.0.0/15', 'AO': '105.168.0.0/13', 'AP': '159.117.192.0/21', 'AR': '181.0.0.0/12', 'AS': '202.70.112.0/20', 'AT': '84.112.0.0/13', 'AU': '1.128.0.0/11', 'AW': '181.41.0.0/18', 'AZ': '5.191.0.0/16', 'BA': '31.176.128.0/17', 'BB': '65.48.128.0/17', 'BD': '114.130.0.0/16', 'BE': '57.0.0.0/8', 'BF': '129.45.128.0/17', 'BG': '95.42.0.0/15', 'BH': '37.131.0.0/17', 'BI': '154.117.192.0/18', 'BJ': '137.255.0.0/16', 'BL': '192.131.134.0/24', 'BM': '196.12.64.0/18', 'BN': '156.31.0.0/16', 'BO': '161.56.0.0/16', 'BQ': '161.0.80.0/20', 'BR': '152.240.0.0/12', 'BS': '24.51.64.0/18', 'BT': '119.2.96.0/19', 'BW': '168.167.0.0/16', 'BY': '178.120.0.0/13', 'BZ': '179.42.192.0/18', 'CA': '99.224.0.0/11', 'CD': '41.243.0.0/16', 'CF': '196.32.200.0/21', 'CG': '197.214.128.0/17', 'CH': '85.0.0.0/13', 'CI': '154.232.0.0/14', 'CK': '202.65.32.0/19', 'CL': '152.172.0.0/14', 'CM': '165.210.0.0/15', 'CN': '36.128.0.0/10', 'CO': '181.240.0.0/12', 'CR': '201.192.0.0/12', 'CU': '152.206.0.0/15', 'CV': '165.90.96.0/19', 'CW': '190.88.128.0/17', 'CY': '46.198.0.0/15', 'CZ': '88.100.0.0/14', 'DE': '53.0.0.0/8', 'DJ': '197.241.0.0/17', 'DK': '87.48.0.0/12', 'DM': '192.243.48.0/20', 'DO': '152.166.0.0/15', 'DZ': '41.96.0.0/12', 'EC': '186.68.0.0/15', 'EE': '90.190.0.0/15', 'EG': '156.160.0.0/11', 'ER': '196.200.96.0/20', 'ES': '88.0.0.0/11', 'ET': '196.188.0.0/14', 'EU': '2.16.0.0/13', 'FI': '91.152.0.0/13', 'FJ': '144.120.0.0/16', 'FM': '119.252.112.0/20', 'FO': '88.85.32.0/19', 'FR': '90.0.0.0/9', 'GA': '41.158.0.0/15', 'GB': '25.0.0.0/8', 'GD': '74.122.88.0/21', 'GE': '31.146.0.0/16', 'GF': '161.22.64.0/18', 'GG': '62.68.160.0/19', 'GH': '45.208.0.0/14', 'GI': '85.115.128.0/19', 'GL': '88.83.0.0/19', 'GM': '160.182.0.0/15', 'GN': '197.149.192.0/18', 'GP': '104.250.0.0/19', 'GQ': '105.235.224.0/20', 'GR': '94.64.0.0/13', 'GT': '168.234.0.0/16', 'GU': '168.123.0.0/16', 'GW': '197.214.80.0/20', 'GY': '181.41.64.0/18', 'HK': '113.252.0.0/14', 'HN': '181.210.0.0/16', 'HR': '93.136.0.0/13', 'HT': '148.102.128.0/17', 'HU': '84.0.0.0/14', 'ID': '39.192.0.0/10', 'IE': '87.32.0.0/12', 'IL': '79.176.0.0/13', 'IM': '5.62.80.0/20', 'IN': '117.192.0.0/10', 'IO': '203.83.48.0/21', 'IQ': '37.236.0.0/14', 'IR': '2.176.0.0/12', 'IS': '82.221.0.0/16', 'IT': '79.0.0.0/10', 'JE': '87.244.64.0/18', 'JM': '72.27.0.0/17', 'JO': '176.29.0.0/16', 'JP': '126.0.0.0/8', 'KE': '105.48.0.0/12', 'KG': '158.181.128.0/17', 'KH': '36.37.128.0/17', 'KI': '103.25.140.0/22', 'KM': '197.255.224.0/20', 'KN': '198.32.32.0/19', 'KP': '175.45.176.0/22', 'KR': '175.192.0.0/10', 'KW': '37.36.0.0/14', 'KY': '64.96.0.0/15', 'KZ': '2.72.0.0/13', 'LA': '115.84.64.0/18', 'LB': '178.135.0.0/16', 'LC': '192.147.231.0/24', 'LI': '82.117.0.0/19', 'LK': '112.134.0.0/15', 'LR': '41.86.0.0/19', 'LS': '129.232.0.0/17', 'LT': '78.56.0.0/13', 'LU': '188.42.0.0/16', 'LV': '46.109.0.0/16', 'LY': '41.252.0.0/14', 'MA': '105.128.0.0/11', 'MC': '88.209.64.0/18', 'MD': '37.246.0.0/16', 'ME': '178.175.0.0/17', 'MF': '74.112.232.0/21', 'MG': '154.126.0.0/17', 'MH': '117.103.88.0/21', 'MK': '77.28.0.0/15', 'ML': '154.118.128.0/18', 'MM': '37.111.0.0/17', 'MN': '49.0.128.0/17', 'MO': '60.246.0.0/16', 'MP': '202.88.64.0/20', 'MQ': '109.203.224.0/19', 'MR': '41.188.64.0/18', 'MS': '208.90.112.0/22', 'MT': '46.11.0.0/16', 'MU': '105.16.0.0/12', 'MV': '27.114.128.0/18', 'MW': '105.234.0.0/16', 'MX': '187.192.0.0/11', 'MY': '175.136.0.0/13', 'MZ': '197.218.0.0/15', 'NA': '41.182.0.0/16', 'NC': '101.101.0.0/18', 'NE': '197.214.0.0/18', 'NF': '203.17.240.0/22', 'NG': '105.112.0.0/12', 'NI': '186.76.0.0/15', 'NL': '145.96.0.0/11', 'NO': '84.208.0.0/13', 'NP': '36.252.0.0/15', 'NR': '203.98.224.0/19', 'NU': '49.156.48.0/22', 'NZ': '49.224.0.0/14', 'OM': '5.36.0.0/15', 'PA': '186.72.0.0/15', 'PE': '186.160.0.0/14', 'PF': '123.50.64.0/18', 'PG': '124.240.192.0/19', 'PH': '49.144.0.0/13', 'PK': '39.32.0.0/11', 'PL': '83.0.0.0/11', 'PM': '70.36.0.0/20', 'PR': '66.50.0.0/16', 'PS': '188.161.0.0/16', 'PT': '85.240.0.0/13', 'PW': '202.124.224.0/20', 'PY': '181.120.0.0/14', 'QA': '37.210.0.0/15', 'RE': '139.26.0.0/16', 'RO': '79.112.0.0/13', 'RS': '178.220.0.0/14', 'RU': '5.136.0.0/13', 'RW': '105.178.0.0/15', 'SA': '188.48.0.0/13', 'SB': '202.1.160.0/19', 'SC': '154.192.0.0/11', 'SD': '154.96.0.0/13', 'SE': '78.64.0.0/12', 'SG': '152.56.0.0/14', 'SI': '188.196.0.0/14', 'SK': '78.98.0.0/15', 'SL': '197.215.0.0/17', 'SM': '89.186.32.0/19', 'SN': '41.82.0.0/15', 'SO': '197.220.64.0/19', 'SR': '186.179.128.0/17', 'SS': '105.235.208.0/21', 'ST': '197.159.160.0/19', 'SV': '168.243.0.0/16', 'SX': '190.102.0.0/20', 'SY': '5.0.0.0/16', 'SZ': '41.84.224.0/19', 'TC': '65.255.48.0/20', 'TD': '154.68.128.0/19', 'TG': '196.168.0.0/14', 'TH': '171.96.0.0/13', 'TJ': '85.9.128.0/18', 'TK': '27.96.24.0/21', 'TL': '180.189.160.0/20', 'TM': '95.85.96.0/19', 'TN': '197.0.0.0/11', 'TO': '175.176.144.0/21', 'TR': '78.160.0.0/11', 'TT': '186.44.0.0/15', 'TV': '202.2.96.0/19', 'TW': '120.96.0.0/11', 'TZ': '156.156.0.0/14', 'UA': '93.72.0.0/13', 'UG': '154.224.0.0/13', 'US': '3.0.0.0/8', 'UY': '167.56.0.0/13', 'UZ': '82.215.64.0/18', 'VA': '212.77.0.0/19', 'VC': '24.92.144.0/20', 'VE': '186.88.0.0/13', 'VG': '172.103.64.0/18', 'VI': '146.226.0.0/16', 'VN': '14.160.0.0/11', 'VU': '202.80.32.0/20', 'WF': '117.20.32.0/21', 'WS': '202.4.32.0/19', 'YE': '134.35.0.0/16', 'YT': '41.242.116.0/22', 'ZA': '41.0.0.0/11', 'ZM': '165.56.0.0/13', 'ZW': '41.85.192.0/19', } @classmethod def random_ipv4(cls, code): block = cls._country_ip_map.get(code.upper()) if not block: return None addr, preflen = block.split('/') addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0] addr_max = addr_min | (0xffffffff >> int(preflen)) return compat_str(socket.inet_ntoa( compat_struct_pack('!L', random.randint(addr_min, addr_max)))) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) return compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'): req.add_header('Ytdl-socks-proxy', proxy) # youtube-dl's http/https handlers do wrapping the socket with socks return None return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type) # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is # released into Public Domain # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387 def long_to_bytes(n, blocksize=0): """long_to_bytes(n:long, blocksize:int) : string Convert a long integer to a byte string. If optional blocksize is given and greater than zero, pad the front of the byte string with binary zeros so that the length is a multiple of blocksize. """ # after much testing, this algorithm was deemed to be the fastest s = b'' n = int(n) while n > 0: s = compat_struct_pack('>I', n & 0xffffffff) + s n = n >> 32 # strip off leading zeros for i in range(len(s)): if s[i] != b'\000'[0]: break else: # only happens when n == 0 s = b'\000' i = 0 s = s[i:] # add back some pad bytes. this could be done more efficiently w.r.t. the # de-padding being done above, but sigh... if blocksize > 0 and len(s) % blocksize: s = (blocksize - len(s) % blocksize) * b'\000' + s return s def bytes_to_long(s): """bytes_to_long(string) : long Convert a byte string to a long integer. This is (essentially) the inverse of long_to_bytes(). """ acc = 0 length = len(s) if length % 4: extra = (4 - length % 4) s = b'\000' * extra + s length = length + extra for i in range(0, length, 4): acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0] return acc def ohdave_rsa_encrypt(data, exponent, modulus): ''' Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/ Input: data: data to encrypt, bytes-like object exponent, modulus: parameter e and N of RSA algorithm, both integer Output: hex string of encrypted data Limitation: supports one block encryption only ''' payload = int(binascii.hexlify(data[::-1]), 16) encrypted = pow(payload, exponent, modulus) return '%x' % encrypted def pkcs1pad(data, length): """ Padding input data with PKCS#1 scheme @param {int[]} data input data @param {int} length target length @returns {int[]} padded data """ if len(data) > length - 11: raise ValueError('Input data too long for PKCS#1 padding') pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)] return [0, 2] + pseudo_random + [0] + data def encode_base_n(num, n, table=None): FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' if not table: table = FULL_TABLE[:n] if n > len(table): raise ValueError('base %d exceeds table length %d' % (n, len(table))) if num == 0: return table[0] ret = '' while num: ret = table[num % n] + ret num = num // n return ret def decode_packed_codes(code): mobj = re.search(PACKED_CODES_RE, code) obfucasted_code, base, count, symbols = mobj.groups() base = int(base) count = int(count) symbols = symbols.split('|') symbol_table = {} while count: count -= 1 base_n_count = encode_base_n(count, base) symbol_table[base_n_count] = symbols[count] or base_n_count return re.sub( r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)], obfucasted_code) def parse_m3u8_attributes(attrib): info = {} for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib): if val.startswith('"'): val = val[1:-1] info[key] = val return info def urshift(val, n): return val >> n if val >= 0 else (val + 0x100000000) >> n # Based on png2str() written by @gdkchan and improved by @yokrysty # Originally posted at https://github.com/rg3/youtube-dl/issues/9706 def decode_png(png_data): # Reference: https://www.w3.org/TR/PNG/ header = png_data[8:] if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR': raise IOError('Not a valid PNG file.') int_map = {1: '>B', 2: '>H', 4: '>I'} unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0] chunks = [] while header: length = unpack_integer(header[:4]) header = header[4:] chunk_type = header[:4] header = header[4:] chunk_data = header[:length] header = header[length:] header = header[4:] # Skip CRC chunks.append({ 'type': chunk_type, 'length': length, 'data': chunk_data }) ihdr = chunks[0]['data'] width = unpack_integer(ihdr[:4]) height = unpack_integer(ihdr[4:8]) idat = b'' for chunk in chunks: if chunk['type'] == b'IDAT': idat += chunk['data'] if not idat: raise IOError('Unable to read PNG data.') decompressed_data = bytearray(zlib.decompress(idat)) stride = width * 3 pixels = [] def _get_pixel(idx): x = idx % stride y = idx // stride return pixels[y][x] for y in range(height): basePos = y * (1 + stride) filter_type = decompressed_data[basePos] current_row = [] pixels.append(current_row) for x in range(stride): color = decompressed_data[1 + basePos + x] basex = y * stride + x left = 0 up = 0 if x > 2: left = _get_pixel(basex - 3) if y > 0: up = _get_pixel(basex - stride) if filter_type == 1: # Sub color = (color + left) & 0xff elif filter_type == 2: # Up color = (color + up) & 0xff elif filter_type == 3: # Average color = (color + ((left + up) >> 1)) & 0xff elif filter_type == 4: # Paeth a = left b = up c = 0 if x > 2 and y > 0: c = _get_pixel(basex - stride - 3) p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: color = (color + a) & 0xff elif pb <= pc: color = (color + b) & 0xff else: color = (color + c) & 0xff current_row.append(color) return width, height, pixels def write_xattr(path, key, value): # This mess below finds the best xattr tool for the job try: # try the pyxattr module... import xattr if hasattr(xattr, 'set'): # pyxattr # Unicode arguments are not supported in python-pyxattr until # version 0.5.0 # See https://github.com/rg3/youtube-dl/issues/5498 pyxattr_required_version = '0.5.0' if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version): # TODO: fallback to CLI tools raise XAttrUnavailableError( 'python-pyxattr is detected but is too old. ' 'youtube-dl requires %s or above while your version is %s. ' 'Falling back to other xattr implementations' % ( pyxattr_required_version, xattr.__version__)) setxattr = xattr.set else: # xattr setxattr = xattr.setxattr try: setxattr(path, key, value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) except ImportError: if compat_os_name == 'nt': # Write xattrs to NTFS Alternate Data Streams: # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29 assert ':' not in key assert os.path.exists(path) ads_fn = path + ':' + key try: with open(ads_fn, 'wb') as f: f.write(value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) else: user_has_setfattr = check_executable('setfattr', ['--version']) user_has_xattr = check_executable('xattr', ['-h']) if user_has_setfattr or user_has_xattr: value = value.decode('utf-8') if user_has_setfattr: executable = 'setfattr' opts = ['-n', key, '-v', value] elif user_has_xattr: executable = 'xattr' opts = ['-w', key, value] cmd = ([encodeFilename(executable, True)] + [encodeArgument(o) for o in opts] + [encodeFilename(path, True)]) try: p = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) stdout, stderr = p.communicate() stderr = stderr.decode('utf-8', 'replace') if p.returncode != 0: raise XAttrMetadataError(p.returncode, stderr) else: # On Unix, and can't find pyxattr, setfattr, or xattr. if sys.platform.startswith('linux'): raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'pyxattr' or 'xattr' " "modules, or the GNU 'attr' package " "(which contains the 'setfattr' tool).") else: raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'xattr' module, " "or the 'xattr' binary.") #!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals import base64 import binascii import calendar import codecs import contextlib import ctypes import datetime import email.utils import errno import functools import gzip import io import itertools import json import locale import math import operator import os import pipes import platform import random import re import socket import ssl import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_HTMLParser, compat_basestring, compat_chr, compat_etree_fromstring, compat_html_entities, compat_html_entities_html5, compat_http_client, compat_kwargs, compat_os_name, compat_parse_qs, compat_shlex_quote, compat_socket_create_connection, compat_str, compat_struct_pack, compat_struct_unpack, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, compat_urllib_parse_unquote_plus, compat_urllib_request, compat_urlparse, compat_xpath, ) from .socks import ( ProxyType, sockssocket, ) def register_socks_protocols(): # "Register" SOCKS protocols # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904 # URLs with protocols not in urlparse.uses_netloc are not handled correctly for scheme in ('socks', 'socks4', 'socks4a', 'socks5'): if scheme not in compat_urlparse.uses_netloc: compat_urlparse.uses_netloc.append(scheme) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } USER_AGENTS = { 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] MONTH_NAMES = { 'en': ENGLISH_MONTH_NAMES, 'fr': [ 'janvier', 'février', 'mars', 'avril', 'mai', 'juin', 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'], } KNOWN_EXTENSIONS = ( 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', 'flv', 'f4v', 'f4a', 'f4b', 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', 'mkv', 'mka', 'mk3d', 'avi', 'divx', 'mov', 'asf', 'wmv', 'wma', '3gp', '3g2', 'mp3', 'flac', 'ape', 'wav', 'f4f', 'f4m', 'm3u8', 'smil') # needed for sanitizing filenames in restricted mode ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'], 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy'))) DATE_FORMATS = ( '%d %B %Y', '%d %b %Y', '%B %d %Y', '%B %dst %Y', '%B %dnd %Y', '%B %dth %Y', '%b %d %Y', '%b %dst %Y', '%b %dnd %Y', '%b %dth %Y', '%b %dst %Y %I:%M', '%b %dnd %Y %I:%M', '%b %dth %Y %I:%M', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', '%b %d %Y at %H:%M', '%b %d %Y at %H:%M:%S', ) DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS) DATE_FORMATS_DAY_FIRST.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d.%m.%y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS) DATE_FORMATS_MONTH_FIRST.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)" def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): for f in node.findall(compat_xpath(xpath)): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): return node.find(compat_xpath(xpath)) if isinstance(xpath, (str, compat_str)): n = _find_xpath(xpath) else: for xp in xpath: n = _find_xpath(xp) if n is not None: break if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute('id', id, html) def get_element_by_class(class_name, html): """Return the content of the first tag with the specified class in the passed HTML document""" retval = get_elements_by_class(class_name, html) return retval[0] if retval else None def get_element_by_attribute(attribute, value, html, escape_value=True): retval = get_elements_by_attribute(attribute, value, html, escape_value) return retval[0] if retval else None def get_elements_by_class(class_name, html): """Return the content of all tags with the specified class in the passed HTML document as a list""" return get_elements_by_attribute( 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name), html, escape_value=False) def get_elements_by_attribute(attribute, value, html, escape_value=True): """Return the content of the tag with the specified attribute in the passed HTML document""" value = re.escape(value) if escape_value else value retlist = [] for m in re.finditer(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), value), html): res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] retlist.append(unescapeHTML(res)) return retlist class HTMLAttributeParser(compat_HTMLParser): """Trivial HTML parser to gather the attributes for a single element""" def __init__(self): self.attrs = {} compat_HTMLParser.__init__(self) def handle_starttag(self, tag, attrs): self.attrs = dict(attrs) def extract_attributes(html_element): """Given a string for an HTML element such as <el a="foo" B="bar" c="&98;az" d=boz empty= noval entity="&amp;" sq='"' dq="'" > Decode and return a dictionary of attributes. { 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz', 'empty': '', 'noval': None, 'entity': '&', 'sq': '"', 'dq': '\'' }. NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions, but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5. """ parser = HTMLAttributeParser() parser.feed(html_element) parser.close() return parser.attrs def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible. """ def replace_insane(char): if restricted and char in ACCENT_CHARS: return ACCENT_CHARS[char] if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of # unwanted failures due to missing protocol def sanitize_url(url): return 'http:%s' % url if url.startswith('//') else url def sanitized_Request(url, *args, **kwargs): return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity_with_semicolon): """Transforms an HTML entity to a character.""" entity = entity_with_semicolon[:-1] # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) # TODO: HTML5 allows entities without a semicolon. For example, # '&Eacuteric' should be decoded as 'Éric'. if entity_with_semicolon in compat_html_entities_html5: return compat_html_entities_html5[entity_with_semicolon] mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 # See https://github.com/rg3/youtube-dl/issues/7518 try: return compat_chr(int(numstr, base)) except ValueError: pass # Unknown entity in name, return its literal representation return '&%s;' % entity def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+;)', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible if sys.platform.startswith('java'): return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class YoutubeDLError(Exception): """Base exception for YoutubeDL errors.""" pass class ExtractorError(YoutubeDLError): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class GeoRestrictedError(ExtractorError): """Geographic restriction Error exception. This exception may be thrown when a video is not available from your geographic location due to geographic restrictions imposed by a website. """ def __init__(self, msg, countries=None): super(GeoRestrictedError, self).__init__(msg, expected=True) self.msg = msg self.countries = countries class DownloadError(YoutubeDLError): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(YoutubeDLError): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(YoutubeDLError): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): super(PostProcessingError, self).__init__(msg) self.msg = msg class MaxDownloadsReached(YoutubeDLError): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(YoutubeDLError): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(YoutubeDLError): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): super(ContentTooShortError, self).__init__( 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected) ) # Both in bytes self.downloaded = downloaded self.expected = expected class XAttrMetadataError(YoutubeDLError): def __init__(self, code=None, msg='Unknown error'): super(XAttrMetadataError, self).__init__(msg) self.code = code self.msg = msg # Parsing code and msg if (self.code in (errno.ENOSPC, errno.EDQUOT) or 'No space left' in self.msg or 'Disk quota excedded' in self.msg): self.reason = 'NO_SPACE' elif self.code == errno.E2BIG or 'Argument list too long' in self.msg: self.reason = 'VALUE_TOO_LONG' else: self.reason = 'NOT_SUPPORTED' class XAttrUnavailableError(YoutubeDLError): pass def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs[b'strict'] = True hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc def handle_youtubedl_headers(headers): filtered_headers = headers if 'Youtubedl-no-compression' in filtered_headers: filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') del filtered_headers['Youtubedl-no-compression'] return filtered_headers class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-no-compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): conn_class = compat_http_client.HTTPConnection socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req = update_Request(req, url=url_escaped) for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) req.headers = handle_youtubedl_headers(req.headers) if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') else: location = location.decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] if sys.version_info < (3, 0): location_escaped = location_escaped.encode('utf-8') resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response def make_socks_conn_class(base_class, socks_proxy): assert issubclass(base_class, ( compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection)) url_components = compat_urlparse.urlparse(socks_proxy) if url_components.scheme.lower() == 'socks5': socks_type = ProxyType.SOCKS5 elif url_components.scheme.lower() in ('socks', 'socks4'): socks_type = ProxyType.SOCKS4 elif url_components.scheme.lower() == 'socks4a': socks_type = ProxyType.SOCKS4A def unquote_if_non_empty(s): if not s: return s return compat_urllib_parse_unquote_plus(s) proxy_args = ( socks_type, url_components.hostname, url_components.port or 1080, True, # Remote DNS unquote_if_non_empty(url_components.username), unquote_if_non_empty(url_components.password), ) class SocksConnection(base_class): def connect(self): self.sock = sockssocket() self.sock.setproxy(*proxy_args) if type(self.timeout) in (int, float): self.sock.settimeout(self.timeout) self.sock.connect((self.host, self.port)) if isinstance(self, compat_http_client.HTTPSConnection): if hasattr(self, '_context'): # Python > 2.6 self.sock = self._context.wrap_socket( self.sock, server_hostname=self.host) else: self.sock = ssl.wrap_socket(self.sock) return SocksConnection class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} conn_class = self._https_conn_class if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, True), req, **kwargs) class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def extract_timezone(date_str): m = re.search( r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group('tz'))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) return timezone, date_str def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: timezone, date_str = extract_timezone(date_str) try: date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) except ValueError: pass def date_formats(day_first=True): return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) _, date_str = extract_timezone(date_str) for expression in date_formats(day_first): try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: try: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') except ValueError: pass if upload_date is not None: return compat_str(upload_date) def unified_timestamp(date_str, day_first=True): if date_str is None: return None date_str = date_str.replace(',', ' ') pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0 timezone, date_str = extract_timezone(date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) for expression in date_formats(day_first): try: dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta) return calendar.timegm(dt.timetuple()) except ValueError: pass timetuple = email.utils.parsedate_tz(date_str) if timetuple: return calendar.timegm(timetuple) + pm_delta * 3600 def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download elif guess.rstrip('/') in KNOWN_EXTENSIONS: return guess.rstrip('/') else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad approximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, '%Y%m%d').date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( (b'GetStdHandle', ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( (b'GetConsoleMode', ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return compat_struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: # Some platforms, such as Jython, is missing fcntl try: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) except ImportError: UNSUPPORTED_MSG = 'file locking is not supported on this platform' def _lock_file(f, exclusive): raise IOError(UNSUPPORTED_MSG) def _unlock_file(f): raise IOError(UNSUPPORTED_MSG) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ url, idata = unsmuggle_url(url, {}) data.update(idata) sdata = compat_urllib_parse_urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def lookup_unit_table(unit_table, s): units_re = '|'.join(re.escape(u) for u in unit_table) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = unit_table[m.group('unit')] return int(float(num_str) * mult) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and unofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'bytes': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'kb': 1000, 'kilobytes': 1000, 'kibibytes': 1024, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'mb': 1000 ** 2, 'megabytes': 1000 ** 2, 'mebibytes': 1024 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'gb': 1000 ** 3, 'gigabytes': 1000 ** 3, 'gibibytes': 1024 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'tb': 1000 ** 4, 'terabytes': 1000 ** 4, 'tebibytes': 1024 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'pb': 1000 ** 5, 'petabytes': 1000 ** 5, 'pebibytes': 1024 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'eb': 1000 ** 6, 'exabytes': 1000 ** 6, 'exbibytes': 1024 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'zb': 1000 ** 7, 'zettabytes': 1000 ** 7, 'zebibytes': 1024 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, 'yb': 1000 ** 8, 'yottabytes': 1000 ** 8, 'yobibytes': 1024 ** 8, } return lookup_unit_table(_UNIT_TABLE, s) def parse_count(s): if s is None: return None s = s.strip() if re.match(r'^[\d,.]+$', s): return str_to_int(s) _UNIT_TABLE = { 'k': 1000, 'K': 1000, 'm': 1000 ** 2, 'M': 1000 ** 2, 'kk': 1000 ** 2, 'KK': 1000 ** 2, } return lookup_unit_table(_UNIT_TABLE, s) def month_by_name(name, lang='en'): """ Return the number of a month by (locale-independently) English name """ month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en']) try: return month_names.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) # ctypes in Jython is not complete # http://bugs.jython.org/issue2148 if sys.platform.startswith('java'): return try: libc = ctypes.cdll.LoadLibrary('libc.so.6') except OSError: return except TypeError: # LoadLibrary in Windows Python 2.7.13 only expects # a bytestring, but since unicode_literals turns # every string into a unicode string, it fails. return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): return s[len(start):] if s is not None and s.startswith(start) else s def remove_end(s, end): return s[:-len(end)] if s is not None and s.endswith(end) else s def remove_quotes(s): if s is None or len(s) < 2: return s for quote in ('"', "'", ): if s[0] == quote and s[-1] == quote: return s[1:-1] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] def base_url(url): return re.match(r'https?://[^?#&]+/', url).group() def urljoin(base, path): if isinstance(path, bytes): path = path.decode('utf-8') if not isinstance(path, compat_str) or not path: return None if re.match(r'^(?:https?:)?//', path): return path if isinstance(base, bytes): base = base.decode('utf-8') if not isinstance(base, compat_str) or not re.match( r'^(?:https?:)?//', base): return None return compat_urlparse.urljoin(base, path) class HEADRequest(compat_urllib_request.Request): def get_method(self): return 'HEAD' class PUTRequest(compat_urllib_request.Request): def get_method(self): return 'PUT' def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def strip_or_none(v): return None if v is None else v.strip() def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() days, hours, mins, secs, ms = [None] * 5 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match( r'''(?ix)(?:P?T)? (?: (?P<days>[0-9]+)\s*d(?:ays?)?\s* )? (?: (?P<hours>[0-9]+)\s*h(?:ours?)?\s* )? (?: (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s* )? (?: (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s* )?Z?$''', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s) if m: hours, mins = m.groups() else: return None duration = 0 if secs: duration += float(secs) if mins: duration += float(mins) * 60 if hours: duration += float(hours) * 60 * 60 if days: duration += float(days) * 24 * 60 * 60 if ms: duration += float(ms) return duration def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers # SIGTTOU if youtube-dl is run in the background. # See https://github.com/rg3/youtube-dl/issues/955#issuecomment-209789656 out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize, use_cache=False): self._pagefunc = pagefunc self._pagesize = pagesize self._use_cache = use_cache if use_cache: self._cache = {} def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = None if self._use_cache: page_results = self._cache.get(pagenum) if page_results is None: page_results = list(self._pagefunc(pagenum)) if self._use_cache: self._cache[pagenum] = page_results startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( netloc=url_parsed.netloc.encode('idna').decode('ascii'), path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii') def update_url_query(url, query): if not query: return url parsed_url = compat_urlparse.urlparse(url) qs = compat_parse_qs(parsed_url.query) qs.update(query) return compat_urlparse.urlunparse(parsed_url._replace( query=compat_urllib_parse_urlencode(qs, True))) def update_Request(req, url=None, data=None, headers={}, query={}): req_headers = req.headers.copy() req_headers.update(headers) req_data = data or req.data req_url = update_url_query(url or req.get_full_url(), query) req_get_method = req.get_method() if req_get_method == 'HEAD': req_type = HEADRequest elif req_get_method == 'PUT': req_type = PUTRequest else: req_type = compat_urllib_request.Request new_req = req_type( req_url, data=req_data, headers=req_headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) if hasattr(req, 'timeout'): new_req.timeout = req.timeout return new_req def dict_get(d, key_or_keys, default=None, skip_false_values=True): if isinstance(key_or_keys, (list, tuple)): for key in key_or_keys: if key not in d or d[key] is None or skip_false_values and not d[key]: continue return d[key] return default return d.get(key_or_keys, default) def try_get(src, getter, expected_type=None): try: v = getter(src) except (AttributeError, KeyError, TypeError, IndexError): pass else: if expected_type is None or isinstance(v, expected_type): return v def encode_compat_str(string, encoding=preferredencoding(), errors='strict'): return string if isinstance(string, compat_str) else compat_str(string, encoding, errors) US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } TV_PARENTAL_GUIDELINES = { 'TV-Y': 0, 'TV-Y7': 7, 'TV-G': 0, 'TV-PG': 0, 'TV-14': 14, 'TV-MA': 17, } def parse_age_limit(s): if type(s) == int: return s if 0 <= s <= 21 else None if not isinstance(s, compat_basestring): return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) if m: return int(m.group('age')) if s in US_RATINGS: return US_RATINGS[s] return TV_PARENTAL_GUIDELINES.get(s) def strip_jsonp(code): return re.sub( r'(?s)^[a-zA-Z0-9_.$]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code) def js_to_json(code): COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*' SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE) INTEGER_TABLE = ( (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16), (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8), ) def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v elif v.startswith('/*') or v.startswith('//') or v == ',': return "" if v[0] in ("'", '"'): v = re.sub(r'(?s)\\.|"', lambda m: { '"': '\\"', "\\'": "'", '\\\n': '', '\\x': '\\u00', }.get(m.group(0), m.group(0)), v[1:-1]) for regex, base in INTEGER_TABLE: im = re.match(regex, v) if im: i = int(im.group(1), base) return '"%d":' % i if v.endswith(':') else '%d' % i return '"%s"' % v return re.sub(r'''(?sx) "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'| {comment}|,(?={skip}[\]}}])| [a-zA-Z_][.a-zA-Z_0-9]*| \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?| [0-9]+(?={skip}:) '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code) def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(compat_shlex_quote(a) for a in args) def error_to_compat_str(err): err_str = str(err) # On python 2 error byte string must be decoded with proper # encoding rather than ascii if sys.version_info[0] < 3: err_str = err_str.decode(preferredencoding()) return err_str def mimetype2ext(mt): if mt is None: return None ext = { 'audio/mp4': 'm4a', # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as # it's the most popular one 'audio/mpeg': 'mp3', }.get(mt) if ext is not None: return ext _, _, res = mt.rpartition('/') res = res.split(';')[0].strip().lower() return { '3gpp': '3gp', 'smptett+xml': 'tt', 'srt': 'srt', 'ttaf+xml': 'dfxp', 'ttml+xml': 'ttml', 'vtt': 'vtt', 'x-flv': 'flv', 'x-mp4-fragmented': 'mp4', 'x-ms-wmv': 'wmv', 'mpegurl': 'm3u8', 'x-mpegurl': 'm3u8', 'vnd.apple.mpegurl': 'm3u8', 'dash+xml': 'mpd', 'f4m': 'f4m', 'f4m+xml': 'f4m', 'hds+xml': 'f4m', 'vnd.ms-sstr+xml': 'ism', 'quicktime': 'mov', }.get(res, res) def parse_codecs(codecs_str): # http://tools.ietf.org/html/rfc6381 if not codecs_str: return {} splited_codecs = list(filter(None, map( lambda str: str.strip(), codecs_str.strip().strip(',').split(',')))) vcodec, acodec = None, None for full_codec in splited_codecs: codec = full_codec.split('.')[0] if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v'): if not vcodec: vcodec = full_codec elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3'): if not acodec: acodec = full_codec else: write_string('WARNING: Unknown codec %s' % full_codec, sys.stderr) if not vcodec and not acodec: if len(splited_codecs) == 2: return { 'vcodec': vcodec, 'acodec': acodec, } elif len(splited_codecs) == 1: return { 'vcodec': 'none', 'acodec': vcodec, } else: return { 'vcodec': vcodec or 'none', 'acodec': acodec or 'none', } return {} def urlhandle_detect_ext(url_handle): getheader = url_handle.headers.get cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<quote>["\'])(?P<quotedstrval>(?:\\.|(?!(?P=quote)|\\).)+?)(?P=quote)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) if (m.group('quotedstrval') is not None or m.group('strval') is not None or # If the original field is a string and matching comparisonvalue is # a number we should respect the origin of the original field # and process comparison value as a string (see # https://github.com/rg3/youtube-dl/issues/11082). actual_value is not None and m.group('intval') is not None and isinstance(actual_value, compat_str)): if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval') quote = m.group('quote') if quote is not None: comparison_value = comparison_value.replace(r'\%s' % quote, quote) else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.')) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): _x = functools.partial(xpath_with_ns, ns_map={ 'ttml': 'http://www.w3.org/ns/ttml', 'ttaf1': 'http://www.w3.org/2006/10/ttaf1', 'ttaf1_0604': 'http://www.w3.org/2006/04/ttaf1', }) class TTMLPElementParser(object): out = '' def start(self, tag, attrib): if tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'): self.out += '\n' def end(self, tag): pass def data(self, data): self.out += data def close(self): return self.out.strip() def parse_node(node): target = TTMLPElementParser() parser = xml.etree.ElementTree.XMLParser(target=target) parser.feed(xml.etree.ElementTree.tostring(node)) return parser.close() dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8')) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall(_x('.//ttaf1_0604:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) dur = parse_dfxp_time_expr(para.attrib.get('dur')) if begin_time is None: continue if not end_time: if not dur: continue end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) if param: param = compat_str(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class GeoUtils(object): # Major IPv4 address blocks per country _country_ip_map = { 'AD': '85.94.160.0/19', 'AE': '94.200.0.0/13', 'AF': '149.54.0.0/17', 'AG': '209.59.64.0/18', 'AI': '204.14.248.0/21', 'AL': '46.99.0.0/16', 'AM': '46.70.0.0/15', 'AO': '105.168.0.0/13', 'AP': '159.117.192.0/21', 'AR': '181.0.0.0/12', 'AS': '202.70.112.0/20', 'AT': '84.112.0.0/13', 'AU': '1.128.0.0/11', 'AW': '181.41.0.0/18', 'AZ': '5.191.0.0/16', 'BA': '31.176.128.0/17', 'BB': '65.48.128.0/17', 'BD': '114.130.0.0/16', 'BE': '57.0.0.0/8', 'BF': '129.45.128.0/17', 'BG': '95.42.0.0/15', 'BH': '37.131.0.0/17', 'BI': '154.117.192.0/18', 'BJ': '137.255.0.0/16', 'BL': '192.131.134.0/24', 'BM': '196.12.64.0/18', 'BN': '156.31.0.0/16', 'BO': '161.56.0.0/16', 'BQ': '161.0.80.0/20', 'BR': '152.240.0.0/12', 'BS': '24.51.64.0/18', 'BT': '119.2.96.0/19', 'BW': '168.167.0.0/16', 'BY': '178.120.0.0/13', 'BZ': '179.42.192.0/18', 'CA': '99.224.0.0/11', 'CD': '41.243.0.0/16', 'CF': '196.32.200.0/21', 'CG': '197.214.128.0/17', 'CH': '85.0.0.0/13', 'CI': '154.232.0.0/14', 'CK': '202.65.32.0/19', 'CL': '152.172.0.0/14', 'CM': '165.210.0.0/15', 'CN': '36.128.0.0/10', 'CO': '181.240.0.0/12', 'CR': '201.192.0.0/12', 'CU': '152.206.0.0/15', 'CV': '165.90.96.0/19', 'CW': '190.88.128.0/17', 'CY': '46.198.0.0/15', 'CZ': '88.100.0.0/14', 'DE': '53.0.0.0/8', 'DJ': '197.241.0.0/17', 'DK': '87.48.0.0/12', 'DM': '192.243.48.0/20', 'DO': '152.166.0.0/15', 'DZ': '41.96.0.0/12', 'EC': '186.68.0.0/15', 'EE': '90.190.0.0/15', 'EG': '156.160.0.0/11', 'ER': '196.200.96.0/20', 'ES': '88.0.0.0/11', 'ET': '196.188.0.0/14', 'EU': '2.16.0.0/13', 'FI': '91.152.0.0/13', 'FJ': '144.120.0.0/16', 'FM': '119.252.112.0/20', 'FO': '88.85.32.0/19', 'FR': '90.0.0.0/9', 'GA': '41.158.0.0/15', 'GB': '25.0.0.0/8', 'GD': '74.122.88.0/21', 'GE': '31.146.0.0/16', 'GF': '161.22.64.0/18', 'GG': '62.68.160.0/19', 'GH': '45.208.0.0/14', 'GI': '85.115.128.0/19', 'GL': '88.83.0.0/19', 'GM': '160.182.0.0/15', 'GN': '197.149.192.0/18', 'GP': '104.250.0.0/19', 'GQ': '105.235.224.0/20', 'GR': '94.64.0.0/13', 'GT': '168.234.0.0/16', 'GU': '168.123.0.0/16', 'GW': '197.214.80.0/20', 'GY': '181.41.64.0/18', 'HK': '113.252.0.0/14', 'HN': '181.210.0.0/16', 'HR': '93.136.0.0/13', 'HT': '148.102.128.0/17', 'HU': '84.0.0.0/14', 'ID': '39.192.0.0/10', 'IE': '87.32.0.0/12', 'IL': '79.176.0.0/13', 'IM': '5.62.80.0/20', 'IN': '117.192.0.0/10', 'IO': '203.83.48.0/21', 'IQ': '37.236.0.0/14', 'IR': '2.176.0.0/12', 'IS': '82.221.0.0/16', 'IT': '79.0.0.0/10', 'JE': '87.244.64.0/18', 'JM': '72.27.0.0/17', 'JO': '176.29.0.0/16', 'JP': '126.0.0.0/8', 'KE': '105.48.0.0/12', 'KG': '158.181.128.0/17', 'KH': '36.37.128.0/17', 'KI': '103.25.140.0/22', 'KM': '197.255.224.0/20', 'KN': '198.32.32.0/19', 'KP': '175.45.176.0/22', 'KR': '175.192.0.0/10', 'KW': '37.36.0.0/14', 'KY': '64.96.0.0/15', 'KZ': '2.72.0.0/13', 'LA': '115.84.64.0/18', 'LB': '178.135.0.0/16', 'LC': '192.147.231.0/24', 'LI': '82.117.0.0/19', 'LK': '112.134.0.0/15', 'LR': '41.86.0.0/19', 'LS': '129.232.0.0/17', 'LT': '78.56.0.0/13', 'LU': '188.42.0.0/16', 'LV': '46.109.0.0/16', 'LY': '41.252.0.0/14', 'MA': '105.128.0.0/11', 'MC': '88.209.64.0/18', 'MD': '37.246.0.0/16', 'ME': '178.175.0.0/17', 'MF': '74.112.232.0/21', 'MG': '154.126.0.0/17', 'MH': '117.103.88.0/21', 'MK': '77.28.0.0/15', 'ML': '154.118.128.0/18', 'MM': '37.111.0.0/17', 'MN': '49.0.128.0/17', 'MO': '60.246.0.0/16', 'MP': '202.88.64.0/20', 'MQ': '109.203.224.0/19', 'MR': '41.188.64.0/18', 'MS': '208.90.112.0/22', 'MT': '46.11.0.0/16', 'MU': '105.16.0.0/12', 'MV': '27.114.128.0/18', 'MW': '105.234.0.0/16', 'MX': '187.192.0.0/11', 'MY': '175.136.0.0/13', 'MZ': '197.218.0.0/15', 'NA': '41.182.0.0/16', 'NC': '101.101.0.0/18', 'NE': '197.214.0.0/18', 'NF': '203.17.240.0/22', 'NG': '105.112.0.0/12', 'NI': '186.76.0.0/15', 'NL': '145.96.0.0/11', 'NO': '84.208.0.0/13', 'NP': '36.252.0.0/15', 'NR': '203.98.224.0/19', 'NU': '49.156.48.0/22', 'NZ': '49.224.0.0/14', 'OM': '5.36.0.0/15', 'PA': '186.72.0.0/15', 'PE': '186.160.0.0/14', 'PF': '123.50.64.0/18', 'PG': '124.240.192.0/19', 'PH': '49.144.0.0/13', 'PK': '39.32.0.0/11', 'PL': '83.0.0.0/11', 'PM': '70.36.0.0/20', 'PR': '66.50.0.0/16', 'PS': '188.161.0.0/16', 'PT': '85.240.0.0/13', 'PW': '202.124.224.0/20', 'PY': '181.120.0.0/14', 'QA': '37.210.0.0/15', 'RE': '139.26.0.0/16', 'RO': '79.112.0.0/13', 'RS': '178.220.0.0/14', 'RU': '5.136.0.0/13', 'RW': '105.178.0.0/15', 'SA': '188.48.0.0/13', 'SB': '202.1.160.0/19', 'SC': '154.192.0.0/11', 'SD': '154.96.0.0/13', 'SE': '78.64.0.0/12', 'SG': '152.56.0.0/14', 'SI': '188.196.0.0/14', 'SK': '78.98.0.0/15', 'SL': '197.215.0.0/17', 'SM': '89.186.32.0/19', 'SN': '41.82.0.0/15', 'SO': '197.220.64.0/19', 'SR': '186.179.128.0/17', 'SS': '105.235.208.0/21', 'ST': '197.159.160.0/19', 'SV': '168.243.0.0/16', 'SX': '190.102.0.0/20', 'SY': '5.0.0.0/16', 'SZ': '41.84.224.0/19', 'TC': '65.255.48.0/20', 'TD': '154.68.128.0/19', 'TG': '196.168.0.0/14', 'TH': '171.96.0.0/13', 'TJ': '85.9.128.0/18', 'TK': '27.96.24.0/21', 'TL': '180.189.160.0/20', 'TM': '95.85.96.0/19', 'TN': '197.0.0.0/11', 'TO': '175.176.144.0/21', 'TR': '78.160.0.0/11', 'TT': '186.44.0.0/15', 'TV': '202.2.96.0/19', 'TW': '120.96.0.0/11', 'TZ': '156.156.0.0/14', 'UA': '93.72.0.0/13', 'UG': '154.224.0.0/13', 'US': '3.0.0.0/8', 'UY': '167.56.0.0/13', 'UZ': '82.215.64.0/18', 'VA': '212.77.0.0/19', 'VC': '24.92.144.0/20', 'VE': '186.88.0.0/13', 'VG': '172.103.64.0/18', 'VI': '146.226.0.0/16', 'VN': '14.160.0.0/11', 'VU': '202.80.32.0/20', 'WF': '117.20.32.0/21', 'WS': '202.4.32.0/19', 'YE': '134.35.0.0/16', 'YT': '41.242.116.0/22', 'ZA': '41.0.0.0/11', 'ZM': '165.56.0.0/13', 'ZW': '41.85.192.0/19', } @classmethod def random_ipv4(cls, code): block = cls._country_ip_map.get(code.upper()) if not block: return None addr, preflen = block.split('/') addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0] addr_max = addr_min | (0xffffffff >> int(preflen)) return compat_str(socket.inet_ntoa( compat_struct_pack('!L', random.randint(addr_min, addr_max)))) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) return compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'): req.add_header('Ytdl-socks-proxy', proxy) # youtube-dl's http/https handlers do wrapping the socket with socks return None return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type) # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is # released into Public Domain # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387 def long_to_bytes(n, blocksize=0): """long_to_bytes(n:long, blocksize:int) : string Convert a long integer to a byte string. If optional blocksize is given and greater than zero, pad the front of the byte string with binary zeros so that the length is a multiple of blocksize. """ # after much testing, this algorithm was deemed to be the fastest s = b'' n = int(n) while n > 0: s = compat_struct_pack('>I', n & 0xffffffff) + s n = n >> 32 # strip off leading zeros for i in range(len(s)): if s[i] != b'\000'[0]: break else: # only happens when n == 0 s = b'\000' i = 0 s = s[i:] # add back some pad bytes. this could be done more efficiently w.r.t. the # de-padding being done above, but sigh... if blocksize > 0 and len(s) % blocksize: s = (blocksize - len(s) % blocksize) * b'\000' + s return s def bytes_to_long(s): """bytes_to_long(string) : long Convert a byte string to a long integer. This is (essentially) the inverse of long_to_bytes(). """ acc = 0 length = len(s) if length % 4: extra = (4 - length % 4) s = b'\000' * extra + s length = length + extra for i in range(0, length, 4): acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0] return acc def ohdave_rsa_encrypt(data, exponent, modulus): ''' Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/ Input: data: data to encrypt, bytes-like object exponent, modulus: parameter e and N of RSA algorithm, both integer Output: hex string of encrypted data Limitation: supports one block encryption only ''' payload = int(binascii.hexlify(data[::-1]), 16) encrypted = pow(payload, exponent, modulus) return '%x' % encrypted def pkcs1pad(data, length): """ Padding input data with PKCS#1 scheme @param {int[]} data input data @param {int} length target length @returns {int[]} padded data """ if len(data) > length - 11: raise ValueError('Input data too long for PKCS#1 padding') pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)] return [0, 2] + pseudo_random + [0] + data def encode_base_n(num, n, table=None): FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' if not table: table = FULL_TABLE[:n] if n > len(table): raise ValueError('base %d exceeds table length %d' % (n, len(table))) if num == 0: return table[0] ret = '' while num: ret = table[num % n] + ret num = num // n return ret def decode_packed_codes(code): mobj = re.search(PACKED_CODES_RE, code) obfucasted_code, base, count, symbols = mobj.groups() base = int(base) count = int(count) symbols = symbols.split('|') symbol_table = {} while count: count -= 1 base_n_count = encode_base_n(count, base) symbol_table[base_n_count] = symbols[count] or base_n_count return re.sub( r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)], obfucasted_code) def parse_m3u8_attributes(attrib): info = {} for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib): if val.startswith('"'): val = val[1:-1] info[key] = val return info def urshift(val, n): return val >> n if val >= 0 else (val + 0x100000000) >> n # Based on png2str() written by @gdkchan and improved by @yokrysty # Originally posted at https://github.com/rg3/youtube-dl/issues/9706 def decode_png(png_data): # Reference: https://www.w3.org/TR/PNG/ header = png_data[8:] if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR': raise IOError('Not a valid PNG file.') int_map = {1: '>B', 2: '>H', 4: '>I'} unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0] chunks = [] while header: length = unpack_integer(header[:4]) header = header[4:] chunk_type = header[:4] header = header[4:] chunk_data = header[:length] header = header[length:] header = header[4:] # Skip CRC chunks.append({ 'type': chunk_type, 'length': length, 'data': chunk_data }) ihdr = chunks[0]['data'] width = unpack_integer(ihdr[:4]) height = unpack_integer(ihdr[4:8]) idat = b'' for chunk in chunks: if chunk['type'] == b'IDAT': idat += chunk['data'] if not idat: raise IOError('Unable to read PNG data.') decompressed_data = bytearray(zlib.decompress(idat)) stride = width * 3 pixels = [] def _get_pixel(idx): x = idx % stride y = idx // stride return pixels[y][x] for y in range(height): basePos = y * (1 + stride) filter_type = decompressed_data[basePos] current_row = [] pixels.append(current_row) for x in range(stride): color = decompressed_data[1 + basePos + x] basex = y * stride + x left = 0 up = 0 if x > 2: left = _get_pixel(basex - 3) if y > 0: up = _get_pixel(basex - stride) if filter_type == 1: # Sub color = (color + left) & 0xff elif filter_type == 2: # Up color = (color + up) & 0xff elif filter_type == 3: # Average color = (color + ((left + up) >> 1)) & 0xff elif filter_type == 4: # Paeth a = left b = up c = 0 if x > 2 and y > 0: c = _get_pixel(basex - stride - 3) p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: color = (color + a) & 0xff elif pb <= pc: color = (color + b) & 0xff else: color = (color + c) & 0xff current_row.append(color) return width, height, pixels def write_xattr(path, key, value): # This mess below finds the best xattr tool for the job try: # try the pyxattr module... import xattr if hasattr(xattr, 'set'): # pyxattr # Unicode arguments are not supported in python-pyxattr until # version 0.5.0 # See https://github.com/rg3/youtube-dl/issues/5498 pyxattr_required_version = '0.5.0' if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version): # TODO: fallback to CLI tools raise XAttrUnavailableError( 'python-pyxattr is detected but is too old. ' 'youtube-dl requires %s or above while your version is %s. ' 'Falling back to other xattr implementations' % ( pyxattr_required_version, xattr.__version__)) setxattr = xattr.set else: # xattr setxattr = xattr.setxattr try: setxattr(path, key, value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) except ImportError: if compat_os_name == 'nt': # Write xattrs to NTFS Alternate Data Streams: # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29 assert ':' not in key assert os.path.exists(path) ads_fn = path + ':' + key try: with open(ads_fn, 'wb') as f: f.write(value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) else: user_has_setfattr = check_executable('setfattr', ['--version']) user_has_xattr = check_executable('xattr', ['-h']) if user_has_setfattr or user_has_xattr: value = value.decode('utf-8') if user_has_setfattr: executable = 'setfattr' opts = ['-n', key, '-v', value] elif user_has_xattr: executable = 'xattr' opts = ['-w', key, value] cmd = ([encodeFilename(executable, True)] + [encodeArgument(o) for o in opts] + [encodeFilename(path, True)]) try: p = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) stdout, stderr = p.communicate() stderr = stderr.decode('utf-8', 'replace') if p.returncode != 0: raise XAttrMetadataError(p.returncode, stderr) else: # On Unix, and can't find pyxattr, setfattr, or xattr. if sys.platform.startswith('linux'): raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'pyxattr' or 'xattr' " "modules, or the GNU 'attr' package " "(which contains the 'setfattr' tool).") else: raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'xattr' module, " "or the 'xattr' binary.")
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-21-fixed/youtube-dl/youtube_dl/utils.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-21-buggy/youtube-dl/youtube_dl/utils.py
youtube-dl-bug-19
#!/usr/bin/env python # coding: utf-8 from __future__ import absolute_import, unicode_literals import collections import contextlib import copy import datetime import errno import fileinput import io import itertools import json import locale import operator import os import platform import re import shutil import subprocess import socket import sys import time import tokenize import traceback import random from .compat import ( compat_basestring, compat_cookiejar, compat_get_terminal_size, compat_http_client, compat_kwargs, compat_numeric_types, compat_os_name, compat_str, compat_tokenize_tokenize, compat_urllib_error, compat_urllib_request, compat_urllib_request_DataHandler, ) from .utils import ( age_restricted, args_to_str, ContentTooShortError, date_from_str, DateRange, DEFAULT_OUTTMPL, determine_ext, determine_protocol, DownloadError, encode_compat_str, encodeFilename, error_to_compat_str, expand_path, ExtractorError, format_bytes, formatSeconds, GeoRestrictedError, int_or_none, ISO3166Utils, locked_file, make_HTTPS_handler, MaxDownloadsReached, PagedList, parse_filesize, PerRequestProxyHandler, platform_name, PostProcessingError, preferredencoding, prepend_extension, register_socks_protocols, render_table, replace_extension, SameFileError, sanitize_filename, sanitize_path, sanitize_url, sanitized_Request, std_headers, subtitles_filename, UnavailableVideoError, url_basename, version_tuple, write_json_file, write_string, YoutubeDLCookieProcessor, YoutubeDLHandler, ) from .cache import Cache from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER from .downloader import get_suitable_downloader from .downloader.rtmp import rtmpdump_version from .postprocessor import ( FFmpegFixupM3u8PP, FFmpegFixupM4aPP, FFmpegFixupStretchedPP, FFmpegMergerPP, FFmpegPostProcessor, get_postprocessor, ) from .version import __version__ if compat_os_name == 'nt': import ctypes class YoutubeDL(object): """YoutubeDL class. YoutubeDL objects are the ones responsible of downloading the actual video file and writing it to disk if the user has requested it, among some other tasks. In most cases there should be one per program. As, given a video URL, the downloader doesn't know how to extract all the needed information, task that InfoExtractors do, it has to pass the URL to one of them. For this, YoutubeDL objects have a method that allows InfoExtractors to be registered in a given order. When it is passed a URL, the YoutubeDL object handles it to the first InfoExtractor it finds that reports being able to handle it. The InfoExtractor extracts all the information about the video or videos the URL refers to, and YoutubeDL process the extracted information, possibly using a File Downloader to download the video. YoutubeDL objects accept a lot of parameters. In order not to saturate the object constructor with arguments, it receives a dictionary of options instead. These options are available through the params attribute for the InfoExtractors to use. The YoutubeDL also registers itself as the downloader in charge for the InfoExtractors that are added to it, so this is a "mutual registration". Available options: username: Username for authentication purposes. password: Password for authentication purposes. videopassword: Password for accessing a video. ap_mso: Adobe Pass multiple-system operator identifier. ap_username: Multiple-system operator account username. ap_password: Multiple-system operator account password. usenetrc: Use netrc for authentication instead. verbose: Print additional info to stdout. quiet: Do not print messages to stdout. no_warnings: Do not print out anything for warnings. forceurl: Force printing final URL. forcetitle: Force printing title. forceid: Force printing ID. forcethumbnail: Force printing thumbnail URL. forcedescription: Force printing description. forcefilename: Force printing final filename. forceduration: Force printing duration. forcejson: Force printing info_dict as JSON. dump_single_json: Force printing the info_dict of the whole playlist (or video) as a single JSON line. simulate: Do not download the video files. format: Video format code. See options.py for more information. outtmpl: Template for output names. restrictfilenames: Do not allow "&" and spaces in file names ignoreerrors: Do not stop on download errors. force_generic_extractor: Force downloader to use the generic extractor nooverwrites: Prevent overwriting files. playliststart: Playlist item to start at. playlistend: Playlist item to end at. playlist_items: Specific indices of playlist to download. playlistreverse: Download playlist items in reverse order. playlistrandom: Download playlist items in random order. matchtitle: Download only matching titles. rejecttitle: Reject downloads for matching titles. logger: Log messages to a logging.Logger instance. logtostderr: Log messages to stderr instead of stdout. writedescription: Write the video description to a .description file writeinfojson: Write the video description to a .info.json file writeannotations: Write the video annotations to a .annotations.xml file writethumbnail: Write the thumbnail image to a file write_all_thumbnails: Write all thumbnail formats to files writesubtitles: Write the video subtitles to a file writeautomaticsub: Write the automatically generated subtitles to a file allsubtitles: Downloads all the subtitles of the video (requires writesubtitles or writeautomaticsub) listsubtitles: Lists all available subtitles for the video subtitlesformat: The format code for subtitles subtitleslangs: List of languages of the subtitles to download keepvideo: Keep the video file after post-processing daterange: A DateRange object, download only if the upload_date is in the range. skip_download: Skip the actual download of the video file cachedir: Location of the cache files in the filesystem. False to disable filesystem cache. noplaylist: Download single video instead of a playlist if in doubt. age_limit: An integer representing the user's age in years. Unsuitable videos for the given age are skipped. min_views: An integer representing the minimum view count the video must have in order to not be skipped. Videos without view count information are always downloaded. None for no limit. max_views: An integer representing the maximum view count. Videos that are more popular than that are not downloaded. Videos without view count information are always downloaded. None for no limit. download_archive: File name of a file where all downloads are recorded. Videos already present in the file are not downloaded again. cookiefile: File name where cookies should be read from and dumped to. nocheckcertificate:Do not verify SSL certificates prefer_insecure: Use HTTP instead of HTTPS to retrieve information. At the moment, this is only supported by YouTube. proxy: URL of the proxy server to use geo_verification_proxy: URL of the proxy to use for IP address verification on geo-restricted sites. (Experimental) socket_timeout: Time to wait for unresponsive hosts, in seconds bidi_workaround: Work around buggy terminals without bidirectional text support, using fridibi debug_printtraffic:Print out sent and received HTTP traffic include_ads: Download ads as well default_search: Prepend this string if an input url is not valid. 'auto' for elaborate guessing encoding: Use this encoding instead of the system-specified. extract_flat: Do not resolve URLs, return the immediate result. Pass in 'in_playlist' to only show this behavior for playlist items. postprocessors: A list of dictionaries, each with an entry * key: The name of the postprocessor. See youtube_dl/postprocessor/__init__.py for a list. as well as any further keyword arguments for the postprocessor. progress_hooks: A list of functions that get called on download progress, with a dictionary with the entries * status: One of "downloading", "error", or "finished". Check this first and ignore unknown values. If status is one of "downloading", or "finished", the following properties may also be present: * filename: The final filename (always present) * tmpfilename: The filename we're currently writing to * downloaded_bytes: Bytes on disk * total_bytes: Size of the whole file, None if unknown * total_bytes_estimate: Guess of the eventual file size, None if unavailable. * elapsed: The number of seconds since download started. * eta: The estimated time in seconds, None if unknown * speed: The download speed in bytes/second, None if unknown * fragment_index: The counter of the currently downloaded video fragment. * fragment_count: The number of fragments (= individual files that will be merged) Progress hooks are guaranteed to be called at least once (with status "finished") if the download is successful. merge_output_format: Extension to use when merging formats. fixup: Automatically correct known faults of the file. One of: - "never": do nothing - "warn": only emit a warning - "detect_or_warn": check whether we can do anything about it, warn otherwise (default) source_address: (Experimental) Client-side IP address to bind to. call_home: Boolean, true iff we are allowed to contact the youtube-dl servers for debugging. sleep_interval: Number of seconds to sleep before each download when used alone or a lower bound of a range for randomized sleep before each download (minimum possible number of seconds to sleep) when used along with max_sleep_interval. max_sleep_interval:Upper bound of a range for randomized sleep before each download (maximum possible number of seconds to sleep). Must only be used along with sleep_interval. Actual sleep time will be a random float from range [sleep_interval; max_sleep_interval]. listformats: Print an overview of available video formats and exit. list_thumbnails: Print a table of all thumbnails and exit. match_filter: A function that gets called with the info_dict of every video. If it returns a message, the video is ignored. If it returns None, the video is downloaded. match_filter_func in utils.py is one example for this. no_color: Do not emit color codes in output. geo_bypass: Bypass geographic restriction via faking X-Forwarded-For HTTP header (experimental) geo_bypass_country: Two-letter ISO 3166-2 country code that will be used for explicit geographic restriction bypassing via faking X-Forwarded-For HTTP header (experimental) The following options determine which downloader is picked: external_downloader: Executable of the external downloader to call. None or unset for standard (built-in) downloader. hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv if True, otherwise use ffmpeg/avconv if False, otherwise use downloader suggested by extractor if None. The following parameters are not used by YoutubeDL itself, they are used by the downloader (see youtube_dl/downloader/common.py): nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test, noresizebuffer, retries, continuedl, noprogress, consoletitle, xattr_set_filesize, external_downloader_args, hls_use_mpegts. The following options are used by the post processors: prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available, otherwise prefer avconv. postprocessor_args: A list of additional command-line arguments for the postprocessor. """ _NUMERIC_FIELDS = set(( 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx', 'timestamp', 'upload_year', 'upload_month', 'upload_day', 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count', 'average_rating', 'comment_count', 'age_limit', 'start_time', 'end_time', 'chapter_number', 'season_number', 'episode_number', 'track_number', 'disc_number', 'release_year', 'playlist_index', )) params = None _ies = [] _pps = [] _download_retcode = None _num_downloads = None _screen_file = None def __init__(self, params=None, auto_init=True): """Create a FileDownloader object with the given options.""" if params is None: params = {} self._ies = [] self._ies_instances = {} self._pps = [] self._progress_hooks = [] self._download_retcode = 0 self._num_downloads = 0 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] self._err_file = sys.stderr self.params = { # Default parameters 'nocheckcertificate': False, } self.params.update(params) self.cache = Cache(self) def check_deprecated(param, option, suggestion): if self.params.get(param) is not None: self.report_warning( '%s is deprecated. Use %s instead.' % (option, suggestion)) return True return False if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'): if self.params.get('geo_verification_proxy') is None: self.params['geo_verification_proxy'] = self.params['cn_verification_proxy'] check_deprecated('autonumber_size', '--autonumber-size', 'output template with %(autonumber)0Nd, where N in the number of digits') check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"') check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"') if params.get('bidi_workaround', False): try: import pty master, slave = pty.openpty() width = compat_get_terminal_size().columns if width is None: width_args = [] else: width_args = ['-w', str(width)] sp_kwargs = dict( stdin=subprocess.PIPE, stdout=slave, stderr=self._err_file) try: self._output_process = subprocess.Popen( ['bidiv'] + width_args, **sp_kwargs ) except OSError: self._output_process = subprocess.Popen( ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs) self._output_channel = os.fdopen(master, 'rb') except OSError as ose: if ose.errno == errno.ENOENT: self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.') else: raise if (sys.platform != 'win32' and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] and not params.get('restrictfilenames', False)): # Unicode filesystem API will throw errors (#1474, #13027) self.report_warning( 'Assuming --restrict-filenames since file system encoding ' 'cannot encode all characters. ' 'Set the LC_ALL environment variable to fix this.') self.params['restrictfilenames'] = True if isinstance(params.get('outtmpl'), bytes): self.report_warning( 'Parameter outtmpl is bytes, but should be a unicode string. ' 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.') self._setup_opener() if auto_init: self.print_debug_header() self.add_default_info_extractors() for pp_def_raw in self.params.get('postprocessors', []): pp_class = get_postprocessor(pp_def_raw['key']) pp_def = dict(pp_def_raw) del pp_def['key'] pp = pp_class(self, **compat_kwargs(pp_def)) self.add_post_processor(pp) for ph in self.params.get('progress_hooks', []): self.add_progress_hook(ph) register_socks_protocols() def warn_if_short_id(self, argv): # short YouTube ID starting with dash? idxs = [ i for i, a in enumerate(argv) if re.match(r'^-[0-9A-Za-z_-]{10}$', a)] if idxs: correct_argv = ( ['youtube-dl'] + [a for i, a in enumerate(argv) if i not in idxs] + ['--'] + [argv[i] for i in idxs] ) self.report_warning( 'Long argument string detected. ' 'Use -- to separate parameters and URLs, like this:\n%s\n' % args_to_str(correct_argv)) def add_info_extractor(self, ie): """Add an InfoExtractor object to the end of the list.""" self._ies.append(ie) if not isinstance(ie, type): self._ies_instances[ie.ie_key()] = ie ie.set_downloader(self) def get_info_extractor(self, ie_key): """ Get an instance of an IE with name ie_key, it will try to get one from the _ies list, if there's no instance it will create a new one and add it to the extractor list. """ ie = self._ies_instances.get(ie_key) if ie is None: ie = get_info_extractor(ie_key)() self.add_info_extractor(ie) return ie def add_default_info_extractors(self): """ Add the InfoExtractors returned by gen_extractors to the end of the list """ for ie in gen_extractor_classes(): self.add_info_extractor(ie) def add_post_processor(self, pp): """Add a PostProcessor object to the end of the chain.""" self._pps.append(pp) pp.set_downloader(self) def add_progress_hook(self, ph): """Add the progress hook (currently only for the file downloader)""" self._progress_hooks.append(ph) def _bidi_workaround(self, message): if not hasattr(self, '_output_channel'): return message assert hasattr(self, '_output_process') assert isinstance(message, compat_str) line_count = message.count('\n') + 1 self._output_process.stdin.write((message + '\n').encode('utf-8')) self._output_process.stdin.flush() res = ''.join(self._output_channel.readline().decode('utf-8') for _ in range(line_count)) return res[:-len('\n')] def to_screen(self, message, skip_eol=False): """Print message to stdout if not in quiet mode.""" return self.to_stdout(message, skip_eol, check_quiet=True) def _write_string(self, s, out=None): write_string(s, out=out, encoding=self.params.get('encoding')) def to_stdout(self, message, skip_eol=False, check_quiet=False): """Print message to stdout if not in quiet mode.""" if self.params.get('logger'): self.params['logger'].debug(message) elif not check_quiet or not self.params.get('quiet', False): message = self._bidi_workaround(message) terminator = ['\n', ''][skip_eol] output = message + terminator self._write_string(output, self._screen_file) def to_stderr(self, message): """Print message to stderr.""" assert isinstance(message, compat_str) if self.params.get('logger'): self.params['logger'].error(message) else: message = self._bidi_workaround(message) output = message + '\n' self._write_string(output, self._err_file) def to_console_title(self, message): if not self.params.get('consoletitle', False): return if compat_os_name == 'nt': if ctypes.windll.kernel32.GetConsoleWindow(): # c_wchar_p() might not be necessary if `message` is # already of type unicode() ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) elif 'TERM' in os.environ: self._write_string('\033]0;%s\007' % message, self._screen_file) def save_console_title(self): if not self.params.get('consoletitle', False): return if compat_os_name != 'nt' and 'TERM' in os.environ: # Save the title on stack self._write_string('\033[22;0t', self._screen_file) def restore_console_title(self): if not self.params.get('consoletitle', False): return if compat_os_name != 'nt' and 'TERM' in os.environ: # Restore the title from stack self._write_string('\033[23;0t', self._screen_file) def __enter__(self): self.save_console_title() return self def __exit__(self, *args): self.restore_console_title() if self.params.get('cookiefile') is not None: self.cookiejar.save() def trouble(self, message=None, tb=None): """Determine action to take when a download problem appears. Depending on if the downloader has been configured to ignore download errors or not, this method may throw an exception or not when errors are found, after printing the message. tb, if given, is additional traceback information. """ if message is not None: self.to_stderr(message) if self.params.get('verbose'): if tb is None: if sys.exc_info()[0]: # if .trouble has been called from an except block tb = '' if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info)) tb += encode_compat_str(traceback.format_exc()) else: tb_data = traceback.format_list(traceback.extract_stack()) tb = ''.join(tb_data) self.to_stderr(tb) if not self.params.get('ignoreerrors', False): if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: exc_info = sys.exc_info()[1].exc_info else: exc_info = sys.exc_info() raise DownloadError(message, exc_info) self._download_retcode = 1 def report_warning(self, message): ''' Print the message to stderr, it will be prefixed with 'WARNING:' If stderr is a tty file the 'WARNING:' will be colored ''' if self.params.get('logger') is not None: self.params['logger'].warning(message) else: if self.params.get('no_warnings'): return if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt': _msg_header = '\033[0;33mWARNING:\033[0m' else: _msg_header = 'WARNING:' warning_message = '%s %s' % (_msg_header, message) self.to_stderr(warning_message) def report_error(self, message, tb=None): ''' Do the same as trouble, but prefixes the message with 'ERROR:', colored in red if stderr is a tty file. ''' if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt': _msg_header = '\033[0;31mERROR:\033[0m' else: _msg_header = 'ERROR:' error_message = '%s %s' % (_msg_header, message) self.trouble(error_message, tb) def report_file_already_downloaded(self, file_name): """Report file has already been fully downloaded.""" try: self.to_screen('[download] %s has already been downloaded' % file_name) except UnicodeEncodeError: self.to_screen('[download] The file has already been downloaded') def prepare_filename(self, info_dict): """Generate the output filename.""" try: template_dict = dict(info_dict) template_dict['epoch'] = int(time.time()) autonumber_size = self.params.get('autonumber_size') if autonumber_size is None: autonumber_size = 5 template_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads if template_dict.get('resolution') is None: if template_dict.get('width') and template_dict.get('height'): template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height']) elif template_dict.get('height'): template_dict['resolution'] = '%sp' % template_dict['height'] elif template_dict.get('width'): template_dict['resolution'] = '%dx?' % template_dict['width'] sanitize = lambda k, v: sanitize_filename( compat_str(v), restricted=self.params.get('restrictfilenames'), is_id=(k == 'id' or k.endswith('_id'))) template_dict = dict((k, v if isinstance(v, compat_numeric_types) else sanitize(k, v)) for k, v in template_dict.items() if v is not None and not isinstance(v, (list, tuple, dict))) template_dict = collections.defaultdict(lambda: 'NA', template_dict) outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) # For fields playlist_index and autonumber convert all occurrences # of %(field)s to %(field)0Nd for backward compatibility field_size_compat_map = { 'playlist_index': len(str(template_dict['n_entries'])), 'autonumber': autonumber_size, } FIELD_SIZE_COMPAT_RE = r'(?<!%)%\((?P<field>autonumber|playlist_index)\)s' mobj = re.search(FIELD_SIZE_COMPAT_RE, outtmpl) if mobj: outtmpl = re.sub( FIELD_SIZE_COMPAT_RE, r'%%(\1)0%dd' % field_size_compat_map[mobj.group('field')], outtmpl) # Missing numeric fields used together with integer presentation types # in format specification will break the argument substitution since # string 'NA' is returned for missing fields. We will patch output # template for missing fields to meet string presentation type. for numeric_field in self._NUMERIC_FIELDS: if numeric_field not in template_dict: # As of [1] format syntax is: # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting FORMAT_RE = r'''(?x) (?<!%) % \({0}\) # mapping key (?:[#0\-+ ]+)? # conversion flags (optional) (?:\d+)? # minimum field width (optional) (?:\.\d+)? # precision (optional) [hlL]? # length modifier (optional) [diouxXeEfFgGcrs%] # conversion type ''' outtmpl = re.sub( FORMAT_RE.format(numeric_field), r'%({0})s'.format(numeric_field), outtmpl) filename = expand_path(outtmpl % template_dict) # Temporary fix for #4787 # 'Treat' all problem characters by passing filename through preferredencoding # to workaround encoding issues with subprocess on python2 @ Windows if sys.version_info < (3, 0) and sys.platform == 'win32': filename = encodeFilename(filename, True).decode(preferredencoding()) return sanitize_path(filename) except ValueError as err: self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')') return None def _match_entry(self, info_dict, incomplete): """ Returns None iff the file should be downloaded """ video_title = info_dict.get('title', info_dict.get('id', 'video')) if 'title' in info_dict: # This can happen when we're just evaluating the playlist title = info_dict['title'] matchtitle = self.params.get('matchtitle', False) if matchtitle: if not re.search(matchtitle, title, re.IGNORECASE): return '"' + title + '" title did not match pattern "' + matchtitle + '"' rejecttitle = self.params.get('rejecttitle', False) if rejecttitle: if re.search(rejecttitle, title, re.IGNORECASE): return '"' + title + '" title matched reject pattern "' + rejecttitle + '"' date = info_dict.get('upload_date') if date is not None: dateRange = self.params.get('daterange', DateRange()) if date not in dateRange: return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange) view_count = info_dict.get('view_count') if view_count is not None: min_views = self.params.get('min_views') if min_views is not None and view_count < min_views: return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views) max_views = self.params.get('max_views') if max_views is not None and view_count > max_views: return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views) if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')): return 'Skipping "%s" because it is age restricted' % video_title if self.in_download_archive(info_dict): return '%s has already been recorded in archive' % video_title if not incomplete: match_filter = self.params.get('match_filter') if match_filter is not None: ret = match_filter(info_dict) if ret is not None: return ret return None @staticmethod def add_extra_info(info_dict, extra_info): '''Set the keys from extra_info in info dict if they are missing''' for key, value in extra_info.items(): info_dict.setdefault(key, value) def extract_info(self, url, download=True, ie_key=None, extra_info={}, process=True, force_generic_extractor=False): ''' Returns a list with a dictionary for each video we find. If 'download', also downloads the videos. extra_info is a dict containing the extra values to add to each result ''' if not ie_key and force_generic_extractor: ie_key = 'Generic' if ie_key: ies = [self.get_info_extractor(ie_key)] else: ies = self._ies for ie in ies: if not ie.suitable(url): continue ie = self.get_info_extractor(ie.ie_key()) if not ie.working(): self.report_warning('The program functionality for this site has been marked as broken, ' 'and will probably not work.') try: ie_result = ie.extract(url) if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here) break if isinstance(ie_result, list): # Backwards compatibility: old IE result format ie_result = { '_type': 'compat_list', 'entries': ie_result, } self.add_default_extra_info(ie_result, ie, url) if process: return self.process_ie_result(ie_result, download, extra_info) else: return ie_result except GeoRestrictedError as e: msg = e.msg if e.countries: msg += '\nThis video is available in %s.' % ', '.join( map(ISO3166Utils.short2full, e.countries)) msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.' self.report_error(msg) break except ExtractorError as e: # An error we somewhat expected self.report_error(compat_str(e), e.format_traceback()) break except MaxDownloadsReached: raise except Exception as e: if self.params.get('ignoreerrors', False): self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc())) break else: raise else: self.report_error('no suitable InfoExtractor for URL %s' % url) def add_default_extra_info(self, ie_result, ie, url): self.add_extra_info(ie_result, { 'extractor': ie.IE_NAME, 'webpage_url': url, 'webpage_url_basename': url_basename(url), 'extractor_key': ie.ie_key(), }) def process_ie_result(self, ie_result, download=True, extra_info={}): """ Take the result of the ie(may be modified) and resolve all unresolved references (URLs, playlist items). It will also download the videos if 'download'. Returns the resolved ie_result. """ result_type = ie_result.get('_type', 'video') if result_type in ('url', 'url_transparent'): ie_result['url'] = sanitize_url(ie_result['url']) extract_flat = self.params.get('extract_flat', False) if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or extract_flat is True): if self.params.get('forcejson', False): self.to_stdout(json.dumps(ie_result)) return ie_result if result_type == 'video': self.add_extra_info(ie_result, extra_info) return self.process_video_result(ie_result, download=download) elif result_type == 'url': # We have to add extra_info to the results because it may be # contained in a playlist return self.extract_info(ie_result['url'], download, ie_key=ie_result.get('ie_key'), extra_info=extra_info) elif result_type == 'url_transparent': # Use the information from the embedding page info = self.extract_info( ie_result['url'], ie_key=ie_result.get('ie_key'), extra_info=extra_info, download=False, process=False) # extract_info may return None when ignoreerrors is enabled and # extraction failed with an error, don't crash and return early # in this case if not info: return info force_properties = dict( (k, v) for k, v in ie_result.items() if v is not None) for f in ('_type', 'url', 'ie_key'): if f in force_properties: del force_properties[f] new_result = info.copy() new_result.update(force_properties) # Extracted info may not be a video result (i.e. # info.get('_type', 'video') != video) but rather an url or # url_transparent. In such cases outer metadata (from ie_result) # should be propagated to inner one (info). For this to happen # _type of info should be overridden with url_transparent. This # fixes issue from https://github.com/rg3/youtube-dl/pull/11163. if new_result.get('_type') == 'url': new_result['_type'] = 'url_transparent' return self.process_ie_result( new_result, download=download, extra_info=extra_info) elif result_type in ('playlist', 'multi_video'): # We process each entry in the playlist playlist = ie_result.get('title') or ie_result.get('id') self.to_screen('[download] Downloading playlist: %s' % playlist) playlist_results = [] playliststart = self.params.get('playliststart', 1) - 1 playlistend = self.params.get('playlistend') # For backwards compatibility, interpret -1 as whole list if playlistend == -1: playlistend = None playlistitems_str = self.params.get('playlist_items') playlistitems = None if playlistitems_str is not None: def iter_playlistitems(format): for string_segment in format.split(','): if '-' in string_segment: start, end = string_segment.split('-') for item in range(int(start), int(end) + 1): yield int(item) else: yield int(string_segment) playlistitems = iter_playlistitems(playlistitems_str) ie_entries = ie_result['entries'] if isinstance(ie_entries, list): n_all_entries = len(ie_entries) if playlistitems: entries = [ ie_entries[i - 1] for i in playlistitems if -n_all_entries <= i - 1 < n_all_entries] else: entries = ie_entries[playliststart:playlistend] n_entries = len(entries) self.to_screen( '[%s] playlist %s: Collected %d video ids (downloading %d of them)' % (ie_result['extractor'], playlist, n_all_entries, n_entries)) elif isinstance(ie_entries, PagedList): if playlistitems: entries = [] for item in playlistitems: entries.extend(ie_entries.getslice( item - 1, item )) else: entries = ie_entries.getslice( playliststart, playlistend) n_entries = len(entries) self.to_screen( '[%s] playlist %s: Downloading %d videos' % (ie_result['extractor'], playlist, n_entries)) else: # iterable if playlistitems: entry_list = list(ie_entries) entries = [entry_list[i - 1] for i in playlistitems] else: entries = list(itertools.islice( ie_entries, playliststart, playlistend)) n_entries = len(entries) self.to_screen( '[%s] playlist %s: Downloading %d videos' % (ie_result['extractor'], playlist, n_entries)) if self.params.get('playlistreverse', False): entries = entries[::-1] if self.params.get('playlistrandom', False): random.shuffle(entries) x_forwarded_for = ie_result.get('__x_forwarded_for_ip') for i, entry in enumerate(entries, 1): self.to_screen('[download] Downloading video %s of %s' % (i, n_entries)) # This __x_forwarded_for_ip thing is a bit ugly but requires # minimal changes if x_forwarded_for: entry['__x_forwarded_for_ip'] = x_forwarded_for extra = { 'n_entries': n_entries, 'playlist': playlist, 'playlist_id': ie_result.get('id'), 'playlist_title': ie_result.get('title'), 'playlist_index': i + playliststart, 'extractor': ie_result['extractor'], 'webpage_url': ie_result['webpage_url'], 'webpage_url_basename': url_basename(ie_result['webpage_url']), 'extractor_key': ie_result['extractor_key'], } reason = self._match_entry(entry, incomplete=True) if reason is not None: self.to_screen('[download] ' + reason) continue entry_result = self.process_ie_result(entry, download=download, extra_info=extra) playlist_results.append(entry_result) ie_result['entries'] = playlist_results self.to_screen('[download] Finished downloading playlist: %s' % playlist) return ie_result elif result_type == 'compat_list': self.report_warning( 'Extractor %s returned a compat_list result. ' 'It needs to be updated.' % ie_result.get('extractor')) def _fixup(r): self.add_extra_info( r, { 'extractor': ie_result['extractor'], 'webpage_url': ie_result['webpage_url'], 'webpage_url_basename': url_basename(ie_result['webpage_url']), 'extractor_key': ie_result['extractor_key'], } ) return r ie_result['entries'] = [ self.process_ie_result(_fixup(r), download, extra_info) for r in ie_result['entries'] ] return ie_result else: raise Exception('Invalid result type: %s' % result_type) def _build_format_filter(self, filter_spec): " Returns a function to filter the formats according to the filter_spec " OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>width|height|tbr|abr|vbr|asr|filesize|fps) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?) $ ''' % '|'.join(map(re.escape, OPERATORS.keys()))) m = operator_rex.search(filter_spec) if m: try: comparison_value = int(m.group('value')) except ValueError: comparison_value = parse_filesize(m.group('value')) if comparison_value is None: comparison_value = parse_filesize(m.group('value') + 'B') if comparison_value is None: raise ValueError( 'Invalid value %r in format specification %r' % ( m.group('value'), filter_spec)) op = OPERATORS[m.group('op')] if not m: STR_OPERATORS = { '=': operator.eq, '!=': operator.ne, '^=': lambda attr, value: attr.startswith(value), '$=': lambda attr, value: attr.endswith(value), '*=': lambda attr, value: value in attr, } str_operator_rex = re.compile(r'''(?x) \s*(?P<key>ext|acodec|vcodec|container|protocol|format_id) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)? \s*(?P<value>[a-zA-Z0-9._-]+) \s*$ ''' % '|'.join(map(re.escape, STR_OPERATORS.keys()))) m = str_operator_rex.search(filter_spec) if m: comparison_value = m.group('value') op = STR_OPERATORS[m.group('op')] if not m: raise ValueError('Invalid filter specification %r' % filter_spec) def _filter(f): actual_value = f.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) return _filter def build_format_selector(self, format_spec): def syntax_error(note, start): message = ( 'Invalid format specification: ' '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1])) return SyntaxError(message) PICKFIRST = 'PICKFIRST' MERGE = 'MERGE' SINGLE = 'SINGLE' GROUP = 'GROUP' FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters']) def _parse_filter(tokens): filter_parts = [] for type, string, start, _, _ in tokens: if type == tokenize.OP and string == ']': return ''.join(filter_parts) else: filter_parts.append(string) def _remove_unused_ops(tokens): # Remove operators that we don't use and join them with the surrounding strings # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9' ALLOWED_OPS = ('/', '+', ',', '(', ')') last_string, last_start, last_end, last_line = None, None, None, None for type, string, start, end, line in tokens: if type == tokenize.OP and string == '[': if last_string: yield tokenize.NAME, last_string, last_start, last_end, last_line last_string = None yield type, string, start, end, line # everything inside brackets will be handled by _parse_filter for type, string, start, end, line in tokens: yield type, string, start, end, line if type == tokenize.OP and string == ']': break elif type == tokenize.OP and string in ALLOWED_OPS: if last_string: yield tokenize.NAME, last_string, last_start, last_end, last_line last_string = None yield type, string, start, end, line elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]: if not last_string: last_string = string last_start = start last_end = end else: last_string += string if last_string: yield tokenize.NAME, last_string, last_start, last_end, last_line def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False): selectors = [] current_selector = None for type, string, start, _, _ in tokens: # ENCODING is only defined in python 3.x if type == getattr(tokenize, 'ENCODING', None): continue elif type in [tokenize.NAME, tokenize.NUMBER]: current_selector = FormatSelector(SINGLE, string, []) elif type == tokenize.OP: if string == ')': if not inside_group: # ')' will be handled by the parentheses group tokens.restore_last_token() break elif inside_merge and string in ['/', ',']: tokens.restore_last_token() break elif inside_choice and string == ',': tokens.restore_last_token() break elif string == ',': if not current_selector: raise syntax_error('"," must follow a format selector', start) selectors.append(current_selector) current_selector = None elif string == '/': if not current_selector: raise syntax_error('"/" must follow a format selector', start) first_choice = current_selector second_choice = _parse_format_selection(tokens, inside_choice=True) current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), []) elif string == '[': if not current_selector: current_selector = FormatSelector(SINGLE, 'best', []) format_filter = _parse_filter(tokens) current_selector.filters.append(format_filter) elif string == '(': if current_selector: raise syntax_error('Unexpected "("', start) group = _parse_format_selection(tokens, inside_group=True) current_selector = FormatSelector(GROUP, group, []) elif string == '+': video_selector = current_selector audio_selector = _parse_format_selection(tokens, inside_merge=True) if not video_selector or not audio_selector: raise syntax_error('"+" must be between two format selectors', start) current_selector = FormatSelector(MERGE, (video_selector, audio_selector), []) else: raise syntax_error('Operator not recognized: "{0}"'.format(string), start) elif type == tokenize.ENDMARKER: break if current_selector: selectors.append(current_selector) return selectors def _build_selector_function(selector): if isinstance(selector, list): fs = [_build_selector_function(s) for s in selector] def selector_function(ctx): for f in fs: for format in f(ctx): yield format return selector_function elif selector.type == GROUP: selector_function = _build_selector_function(selector.selector) elif selector.type == PICKFIRST: fs = [_build_selector_function(s) for s in selector.selector] def selector_function(ctx): for f in fs: picked_formats = list(f(ctx)) if picked_formats: return picked_formats return [] elif selector.type == SINGLE: format_spec = selector.selector def selector_function(ctx): formats = list(ctx['formats']) if not formats: return if format_spec == 'all': for f in formats: yield f elif format_spec in ['best', 'worst', None]: format_idx = 0 if format_spec == 'worst' else -1 audiovideo_formats = [ f for f in formats if f.get('vcodec') != 'none' and f.get('acodec') != 'none'] if audiovideo_formats: yield audiovideo_formats[format_idx] # for extractors with incomplete formats (audio only (soundcloud) # or video only (imgur)) we will fallback to best/worst # {video,audio}-only format elif ctx['incomplete_formats']: yield formats[format_idx] elif format_spec == 'bestaudio': audio_formats = [ f for f in formats if f.get('vcodec') == 'none'] if audio_formats: yield audio_formats[-1] elif format_spec == 'worstaudio': audio_formats = [ f for f in formats if f.get('vcodec') == 'none'] if audio_formats: yield audio_formats[0] elif format_spec == 'bestvideo': video_formats = [ f for f in formats if f.get('acodec') == 'none'] if video_formats: yield video_formats[-1] elif format_spec == 'worstvideo': video_formats = [ f for f in formats if f.get('acodec') == 'none'] if video_formats: yield video_formats[0] else: extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav'] if format_spec in extensions: filter_f = lambda f: f['ext'] == format_spec else: filter_f = lambda f: f['format_id'] == format_spec matches = list(filter(filter_f, formats)) if matches: yield matches[-1] elif selector.type == MERGE: def _merge(formats_info): format_1, format_2 = [f['format_id'] for f in formats_info] # The first format must contain the video and the # second the audio if formats_info[0].get('vcodec') == 'none': self.report_error('The first format must ' 'contain the video, try using ' '"-f %s+%s"' % (format_2, format_1)) return # Formats must be opposite (video+audio) if formats_info[0].get('acodec') == 'none' and formats_info[1].get('acodec') == 'none': self.report_error( 'Both formats %s and %s are video-only, you must specify "-f video+audio"' % (format_1, format_2)) return output_ext = ( formats_info[0]['ext'] if self.params.get('merge_output_format') is None else self.params['merge_output_format']) return { 'requested_formats': formats_info, 'format': '%s+%s' % (formats_info[0].get('format'), formats_info[1].get('format')), 'format_id': '%s+%s' % (formats_info[0].get('format_id'), formats_info[1].get('format_id')), 'width': formats_info[0].get('width'), 'height': formats_info[0].get('height'), 'resolution': formats_info[0].get('resolution'), 'fps': formats_info[0].get('fps'), 'vcodec': formats_info[0].get('vcodec'), 'vbr': formats_info[0].get('vbr'), 'stretched_ratio': formats_info[0].get('stretched_ratio'), 'acodec': formats_info[1].get('acodec'), 'abr': formats_info[1].get('abr'), 'ext': output_ext, } video_selector, audio_selector = map(_build_selector_function, selector.selector) def selector_function(ctx): for pair in itertools.product( video_selector(copy.deepcopy(ctx)), audio_selector(copy.deepcopy(ctx))): yield _merge(pair) filters = [self._build_format_filter(f) for f in selector.filters] def final_selector(ctx): ctx_copy = copy.deepcopy(ctx) for _filter in filters: ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats'])) return selector_function(ctx_copy) return final_selector stream = io.BytesIO(format_spec.encode('utf-8')) try: tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline))) except tokenize.TokenError: raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec))) class TokenIterator(object): def __init__(self, tokens): self.tokens = tokens self.counter = 0 def __iter__(self): return self def __next__(self): if self.counter >= len(self.tokens): raise StopIteration() value = self.tokens[self.counter] self.counter += 1 return value next = __next__ def restore_last_token(self): self.counter -= 1 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens))) return _build_selector_function(parsed_selector) def _calc_headers(self, info_dict): res = std_headers.copy() add_headers = info_dict.get('http_headers') if add_headers: res.update(add_headers) cookies = self._calc_cookies(info_dict) if cookies: res['Cookie'] = cookies if 'X-Forwarded-For' not in res: x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip') if x_forwarded_for_ip: res['X-Forwarded-For'] = x_forwarded_for_ip return res def _calc_cookies(self, info_dict): pr = sanitized_Request(info_dict['url']) self.cookiejar.add_cookie_header(pr) return pr.get_header('Cookie') def process_video_result(self, info_dict, download=True): assert info_dict.get('_type', 'video') == 'video' if 'id' not in info_dict: raise ExtractorError('Missing "id" field in extractor result') if 'title' not in info_dict: raise ExtractorError('Missing "title" field in extractor result') def report_force_conversion(field, field_not, conversion): self.report_warning( '"%s" field is not %s - forcing %s conversion, there is an error in extractor' % (field, field_not, conversion)) def sanitize_string_field(info, string_field): field = info.get(string_field) if field is None or isinstance(field, compat_str): return report_force_conversion(string_field, 'a string', 'string') info[string_field] = compat_str(field) def sanitize_numeric_fields(info): for numeric_field in self._NUMERIC_FIELDS: field = info.get(numeric_field) if field is None or isinstance(field, compat_numeric_types): continue report_force_conversion(numeric_field, 'numeric', 'int') info[numeric_field] = int_or_none(field) sanitize_string_field(info_dict, 'id') sanitize_numeric_fields(info_dict) if 'playlist' not in info_dict: # It isn't part of a playlist info_dict['playlist'] = None info_dict['playlist_index'] = None thumbnails = info_dict.get('thumbnails') if thumbnails is None: thumbnail = info_dict.get('thumbnail') if thumbnail: info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}] if thumbnails: thumbnails.sort(key=lambda t: ( t.get('preference') if t.get('preference') is not None else -1, t.get('width') if t.get('width') is not None else -1, t.get('height') if t.get('height') is not None else -1, t.get('id') if t.get('id') is not None else '', t.get('url'))) for i, t in enumerate(thumbnails): t['url'] = sanitize_url(t['url']) if t.get('width') and t.get('height'): t['resolution'] = '%dx%d' % (t['width'], t['height']) if t.get('id') is None: t['id'] = '%d' % i if self.params.get('list_thumbnails'): self.list_thumbnails(info_dict) return thumbnail = info_dict.get('thumbnail') if thumbnail: info_dict['thumbnail'] = sanitize_url(thumbnail) elif thumbnails: info_dict['thumbnail'] = thumbnails[-1]['url'] if 'display_id' not in info_dict and 'id' in info_dict: info_dict['display_id'] = info_dict['id'] if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None: # Working around out-of-range timestamp values (e.g. negative ones on Windows, # see http://bugs.python.org/issue1646728) try: upload_date = datetime.datetime.utcfromtimestamp(info_dict['timestamp']) info_dict['upload_date'] = upload_date.strftime('%Y%m%d') except (ValueError, OverflowError, OSError): pass # Auto generate title fields corresponding to the *_number fields when missing # in order to always have clean titles. This is very common for TV series. for field in ('chapter', 'season', 'episode'): if info_dict.get('%s_number' % field) is not None and not info_dict.get(field): info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field]) subtitles = info_dict.get('subtitles') if subtitles: for _, subtitle in subtitles.items(): for subtitle_format in subtitle: if subtitle_format.get('url'): subtitle_format['url'] = sanitize_url(subtitle_format['url']) if subtitle_format.get('ext') is None: subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower() if self.params.get('listsubtitles', False): if 'automatic_captions' in info_dict: self.list_subtitles(info_dict['id'], info_dict.get('automatic_captions'), 'automatic captions') self.list_subtitles(info_dict['id'], subtitles, 'subtitles') return info_dict['requested_subtitles'] = self.process_subtitles( info_dict['id'], subtitles, info_dict.get('automatic_captions')) # We now pick which formats have to be downloaded if info_dict.get('formats') is None: # There's only one format available formats = [info_dict] else: formats = info_dict['formats'] if not formats: raise ExtractorError('No video formats found!') def is_wellformed(f): url = f.get('url') valid_url = url and isinstance(url, compat_str) if not valid_url: self.report_warning( '"url" field is missing or empty - skipping format, ' 'there is an error in extractor') return valid_url # Filter out malformed formats for better extraction robustness formats = list(filter(is_wellformed, formats)) formats_dict = {} # We check that all the formats have the format and format_id fields for i, format in enumerate(formats): sanitize_string_field(format, 'format_id') sanitize_numeric_fields(format) format['url'] = sanitize_url(format['url']) if format.get('format_id') is None: format['format_id'] = compat_str(i) else: # Sanitize format_id from characters used in format selector expression format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id']) format_id = format['format_id'] if format_id not in formats_dict: formats_dict[format_id] = [] formats_dict[format_id].append(format) # Make sure all formats have unique format_id for format_id, ambiguous_formats in formats_dict.items(): if len(ambiguous_formats) > 1: for i, format in enumerate(ambiguous_formats): format['format_id'] = '%s-%d' % (format_id, i) for i, format in enumerate(formats): if format.get('format') is None: format['format'] = '{id} - {res}{note}'.format( id=format['format_id'], res=self.format_resolution(format), note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '', ) # Automatically determine file extension if missing if format.get('ext') is None: format['ext'] = determine_ext(format['url']).lower() # Automatically determine protocol if missing (useful for format # selection purposes) if format.get('protocol') is None: format['protocol'] = determine_protocol(format) # Add HTTP headers, so that external programs can use them from the # json output full_format_info = info_dict.copy() full_format_info.update(format) format['http_headers'] = self._calc_headers(full_format_info) # Remove private housekeeping stuff if '__x_forwarded_for_ip' in info_dict: del info_dict['__x_forwarded_for_ip'] # TODO Central sorting goes here if formats[0] is not info_dict: # only set the 'formats' fields if the original info_dict list them # otherwise we end up with a circular reference, the first (and unique) # element in the 'formats' field in info_dict is info_dict itself, # which can't be exported to json info_dict['formats'] = formats if self.params.get('listformats'): self.list_formats(info_dict) return req_format = self.params.get('format') if req_format is None: req_format_list = [] if (self.params.get('outtmpl', DEFAULT_OUTTMPL) != '-' and not info_dict.get('is_live')): merger = FFmpegMergerPP(self) if merger.available and merger.can_merge(): req_format_list.append('bestvideo+bestaudio') req_format_list.append('best') req_format = '/'.join(req_format_list) format_selector = self.build_format_selector(req_format) # While in format selection we may need to have an access to the original # format set in order to calculate some metrics or do some processing. # For now we need to be able to guess whether original formats provided # by extractor are incomplete or not (i.e. whether extractor provides only # video-only or audio-only formats) for proper formats selection for # extractors with such incomplete formats (see # https://github.com/rg3/youtube-dl/pull/5556). # Since formats may be filtered during format selection and may not match # the original formats the results may be incorrect. Thus original formats # or pre-calculated metrics should be passed to format selection routines # as well. # We will pass a context object containing all necessary additional data # instead of just formats. # This fixes incorrect format selection issue (see # https://github.com/rg3/youtube-dl/issues/10083). incomplete_formats = ( # All formats are video-only or all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats) or # all formats are audio-only all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)) ctx = { 'formats': formats, 'incomplete_formats': incomplete_formats, } formats_to_download = list(format_selector(ctx)) if not formats_to_download: raise ExtractorError('requested format not available', expected=True) if download: if len(formats_to_download) > 1: self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download))) for format in formats_to_download: new_info = dict(info_dict) new_info.update(format) self.process_info(new_info) # We update the info dict with the best quality format (backwards compatibility) info_dict.update(formats_to_download[-1]) return info_dict def process_subtitles(self, video_id, normal_subtitles, automatic_captions): """Select the requested subtitles and their format""" available_subs = {} if normal_subtitles and self.params.get('writesubtitles'): available_subs.update(normal_subtitles) if automatic_captions and self.params.get('writeautomaticsub'): for lang, cap_info in automatic_captions.items(): if lang not in available_subs: available_subs[lang] = cap_info if (not self.params.get('writesubtitles') and not self.params.get('writeautomaticsub') or not available_subs): return None if self.params.get('allsubtitles', False): requested_langs = available_subs.keys() else: if self.params.get('subtitleslangs', False): requested_langs = self.params.get('subtitleslangs') elif 'en' in available_subs: requested_langs = ['en'] else: requested_langs = [list(available_subs.keys())[0]] formats_query = self.params.get('subtitlesformat', 'best') formats_preference = formats_query.split('/') if formats_query else [] subs = {} for lang in requested_langs: formats = available_subs.get(lang) if formats is None: self.report_warning('%s subtitles not available for %s' % (lang, video_id)) continue for ext in formats_preference: if ext == 'best': f = formats[-1] break matches = list(filter(lambda f: f['ext'] == ext, formats)) if matches: f = matches[-1] break else: f = formats[-1] self.report_warning( 'No subtitle format found matching "%s" for language %s, ' 'using %s' % (formats_query, lang, f['ext'])) subs[lang] = f return subs def process_info(self, info_dict): """Process a single resolved IE result.""" assert info_dict.get('_type', 'video') == 'video' max_downloads = self.params.get('max_downloads') if max_downloads is not None: if self._num_downloads >= int(max_downloads): raise MaxDownloadsReached() info_dict['fulltitle'] = info_dict['title'] if len(info_dict['title']) > 200: info_dict['title'] = info_dict['title'][:197] + '...' if 'format' not in info_dict: info_dict['format'] = info_dict['ext'] reason = self._match_entry(info_dict, incomplete=False) if reason is not None: self.to_screen('[download] ' + reason) return self._num_downloads += 1 info_dict['_filename'] = filename = self.prepare_filename(info_dict) # Forced printings if self.params.get('forcetitle', False): self.to_stdout(info_dict['fulltitle']) if self.params.get('forceid', False): self.to_stdout(info_dict['id']) if self.params.get('forceurl', False): if info_dict.get('requested_formats') is not None: for f in info_dict['requested_formats']: self.to_stdout(f['url'] + f.get('play_path', '')) else: # For RTMP URLs, also include the playpath self.to_stdout(info_dict['url'] + info_dict.get('play_path', '')) if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None: self.to_stdout(info_dict['thumbnail']) if self.params.get('forcedescription', False) and info_dict.get('description') is not None: self.to_stdout(info_dict['description']) if self.params.get('forcefilename', False) and filename is not None: self.to_stdout(filename) if self.params.get('forceduration', False) and info_dict.get('duration') is not None: self.to_stdout(formatSeconds(info_dict['duration'])) if self.params.get('forceformat', False): self.to_stdout(info_dict['format']) if self.params.get('forcejson', False): self.to_stdout(json.dumps(info_dict)) # Do nothing else if in simulate mode if self.params.get('simulate', False): return if filename is None: return try: dn = os.path.dirname(sanitize_path(encodeFilename(filename))) if dn and not os.path.exists(dn): os.makedirs(dn) except (OSError, IOError) as err: self.report_error('unable to create directory ' + error_to_compat_str(err)) return if self.params.get('writedescription', False): descfn = replace_extension(filename, 'description', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)): self.to_screen('[info] Video description is already present') elif info_dict.get('description') is None: self.report_warning('There\'s no description to write.') else: try: self.to_screen('[info] Writing video description to: ' + descfn) with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: descfile.write(info_dict['description']) except (OSError, IOError): self.report_error('Cannot write description file ' + descfn) return if self.params.get('writeannotations', False): annofn = replace_extension(filename, 'annotations.xml', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)): self.to_screen('[info] Video annotations are already present') else: try: self.to_screen('[info] Writing video annotations to: ' + annofn) with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile: annofile.write(info_dict['annotations']) except (KeyError, TypeError): self.report_warning('There are no annotations to write.') except (OSError, IOError): self.report_error('Cannot write annotations file: ' + annofn) return subtitles_are_requested = any([self.params.get('writesubtitles', False), self.params.get('writeautomaticsub')]) if subtitles_are_requested and info_dict.get('requested_subtitles'): # subtitles download errors are already managed as troubles in relevant IE # that way it will silently go on when used with unsupporting IE subtitles = info_dict['requested_subtitles'] ie = self.get_info_extractor(info_dict['extractor_key']) for sub_lang, sub_info in subtitles.items(): sub_format = sub_info['ext'] if sub_info.get('data') is not None: sub_data = sub_info['data'] else: try: sub_data = ie._download_webpage( sub_info['url'], info_dict['id'], note=False) except ExtractorError as err: self.report_warning('Unable to download subtitle for "%s": %s' % (sub_lang, error_to_compat_str(err.cause))) continue try: sub_filename = subtitles_filename(filename, sub_lang, sub_format) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)): self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format)) else: self.to_screen('[info] Writing video subtitles to: ' + sub_filename) # Use newline='' to prevent conversion of newline characters # See https://github.com/rg3/youtube-dl/issues/10268 with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile: subfile.write(sub_data) except (OSError, IOError): self.report_error('Cannot write subtitles file ' + sub_filename) return if self.params.get('writeinfojson', False): infofn = replace_extension(filename, 'info.json', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)): self.to_screen('[info] Video description metadata is already present') else: self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn) try: write_json_file(self.filter_requested_info(info_dict), infofn) except (OSError, IOError): self.report_error('Cannot write metadata to JSON file ' + infofn) return self._write_thumbnails(info_dict, filename) if not self.params.get('skip_download', False): try: def dl(name, info): fd = get_suitable_downloader(info, self.params)(self, self.params) for ph in self._progress_hooks: fd.add_progress_hook(ph) if self.params.get('verbose'): self.to_stdout('[debug] Invoking downloader on %r' % info.get('url')) return fd.download(name, info) if info_dict.get('requested_formats') is not None: downloaded = [] success = True merger = FFmpegMergerPP(self) if not merger.available: postprocessors = [] self.report_warning('You have requested multiple ' 'formats but ffmpeg or avconv are not installed.' ' The formats won\'t be merged.') else: postprocessors = [merger] def compatible_formats(formats): video, audio = formats # Check extension video_ext, audio_ext = audio.get('ext'), video.get('ext') if video_ext and audio_ext: COMPATIBLE_EXTS = ( ('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'), ('webm') ) for exts in COMPATIBLE_EXTS: if video_ext in exts and audio_ext in exts: return True # TODO: Check acodec/vcodec return False filename_real_ext = os.path.splitext(filename)[1][1:] filename_wo_ext = ( os.path.splitext(filename)[0] if filename_real_ext == info_dict['ext'] else filename) requested_formats = info_dict['requested_formats'] if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats): info_dict['ext'] = 'mkv' self.report_warning( 'Requested formats are incompatible for merge and will be merged into mkv.') # Ensure filename always has a correct extension for successful merge filename = '%s.%s' % (filename_wo_ext, info_dict['ext']) if os.path.exists(encodeFilename(filename)): self.to_screen( '[download] %s has already been downloaded and ' 'merged' % filename) else: for f in requested_formats: new_info = dict(info_dict) new_info.update(f) fname = self.prepare_filename(new_info) fname = prepend_extension(fname, 'f%s' % f['format_id'], new_info['ext']) downloaded.append(fname) partial_success = dl(fname, new_info) success = success and partial_success info_dict['__postprocessors'] = postprocessors info_dict['__files_to_merge'] = downloaded else: # Just a single file success = dl(filename, info_dict) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_error('unable to download video data: %s' % error_to_compat_str(err)) return except (OSError, IOError) as err: raise UnavailableVideoError(err) except (ContentTooShortError, ) as err: self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) return if success and filename != '-': # Fixup content fixup_policy = self.params.get('fixup') if fixup_policy is None: fixup_policy = 'detect_or_warn' INSTALL_FFMPEG_MESSAGE = 'Install ffmpeg or avconv to fix this automatically.' stretched_ratio = info_dict.get('stretched_ratio') if stretched_ratio is not None and stretched_ratio != 1: if fixup_policy == 'warn': self.report_warning('%s: Non-uniform pixel ratio (%s)' % ( info_dict['id'], stretched_ratio)) elif fixup_policy == 'detect_or_warn': stretched_pp = FFmpegFixupStretchedPP(self) if stretched_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(stretched_pp) else: self.report_warning( '%s: Non-uniform pixel ratio (%s). %s' % (info_dict['id'], stretched_ratio, INSTALL_FFMPEG_MESSAGE)) else: assert fixup_policy in ('ignore', 'never') if (info_dict.get('requested_formats') is None and info_dict.get('container') == 'm4a_dash'): if fixup_policy == 'warn': self.report_warning( '%s: writing DASH m4a. ' 'Only some players support this container.' % info_dict['id']) elif fixup_policy == 'detect_or_warn': fixup_pp = FFmpegFixupM4aPP(self) if fixup_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(fixup_pp) else: self.report_warning( '%s: writing DASH m4a. ' 'Only some players support this container. %s' % (info_dict['id'], INSTALL_FFMPEG_MESSAGE)) else: assert fixup_policy in ('ignore', 'never') if (info_dict.get('protocol') == 'm3u8_native' or info_dict.get('protocol') == 'm3u8' and self.params.get('hls_prefer_native')): if fixup_policy == 'warn': self.report_warning('%s: malformed AAC bitstream detected.' % ( info_dict['id'])) elif fixup_policy == 'detect_or_warn': fixup_pp = FFmpegFixupM3u8PP(self) if fixup_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(fixup_pp) else: self.report_warning( '%s: malformed AAC bitstream detected. %s' % (info_dict['id'], INSTALL_FFMPEG_MESSAGE)) else: assert fixup_policy in ('ignore', 'never') try: self.post_process(filename, info_dict) except (PostProcessingError) as err: self.report_error('postprocessing: %s' % str(err)) return self.record_download_archive(info_dict) def download(self, url_list): """Download a given list of URLs.""" outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) if (len(url_list) > 1 and outtmpl != '-' and '%' not in outtmpl and self.params.get('max_downloads') != 1): raise SameFileError(outtmpl) for url in url_list: try: # It also downloads the videos res = self.extract_info( url, force_generic_extractor=self.params.get('force_generic_extractor', False)) except UnavailableVideoError: self.report_error('unable to download video') except MaxDownloadsReached: self.to_screen('[info] Maximum number of downloaded files reached.') raise else: if self.params.get('dump_single_json', False): self.to_stdout(json.dumps(res)) return self._download_retcode def download_with_info_file(self, info_filename): with contextlib.closing(fileinput.FileInput( [info_filename], mode='r', openhook=fileinput.hook_encoded('utf-8'))) as f: # FileInput doesn't have a read method, we can't call json.load info = self.filter_requested_info(json.loads('\n'.join(f))) try: self.process_ie_result(info, download=True) except DownloadError: webpage_url = info.get('webpage_url') if webpage_url is not None: self.report_warning('The info failed to download, trying with "%s"' % webpage_url) return self.download([webpage_url]) else: raise return self._download_retcode @staticmethod def filter_requested_info(info_dict): return dict( (k, v) for k, v in info_dict.items() if k not in ['requested_formats', 'requested_subtitles']) def post_process(self, filename, ie_info): """Run all the postprocessors on the given file.""" info = dict(ie_info) info['filepath'] = filename pps_chain = [] if ie_info.get('__postprocessors') is not None: pps_chain.extend(ie_info['__postprocessors']) pps_chain.extend(self._pps) for pp in pps_chain: files_to_delete = [] try: files_to_delete, info = pp.run(info) except PostProcessingError as e: self.report_error(e.msg) if files_to_delete and not self.params.get('keepvideo', False): for old_filename in files_to_delete: self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename) try: os.remove(encodeFilename(old_filename)) except (IOError, OSError): self.report_warning('Unable to remove downloaded original file') def _make_archive_id(self, info_dict): # Future-proof against any change in case # and backwards compatibility with prior versions extractor = info_dict.get('extractor_key') if extractor is None: if 'id' in info_dict: extractor = info_dict.get('ie_key') # key in a playlist if extractor is None: return None # Incomplete video information return extractor.lower() + ' ' + info_dict['id'] def in_download_archive(self, info_dict): fn = self.params.get('download_archive') if fn is None: return False vid_id = self._make_archive_id(info_dict) if vid_id is None: return False # Incomplete video information try: with locked_file(fn, 'r', encoding='utf-8') as archive_file: for line in archive_file: if line.strip() == vid_id: return True except IOError as ioe: if ioe.errno != errno.ENOENT: raise return False def record_download_archive(self, info_dict): fn = self.params.get('download_archive') if fn is None: return vid_id = self._make_archive_id(info_dict) assert vid_id with locked_file(fn, 'a', encoding='utf-8') as archive_file: archive_file.write(vid_id + '\n') @staticmethod def format_resolution(format, default='unknown'): if format.get('vcodec') == 'none': return 'audio only' if format.get('resolution') is not None: return format['resolution'] if format.get('height') is not None: if format.get('width') is not None: res = '%sx%s' % (format['width'], format['height']) else: res = '%sp' % format['height'] elif format.get('width') is not None: res = '%dx?' % format['width'] else: res = default return res def _format_note(self, fdict): res = '' if fdict.get('ext') in ['f4f', 'f4m']: res += '(unsupported) ' if fdict.get('language'): if res: res += ' ' res += '[%s] ' % fdict['language'] if fdict.get('format_note') is not None: res += fdict['format_note'] + ' ' if fdict.get('tbr') is not None: res += '%4dk ' % fdict['tbr'] if fdict.get('container') is not None: if res: res += ', ' res += '%s container' % fdict['container'] if (fdict.get('vcodec') is not None and fdict.get('vcodec') != 'none'): if res: res += ', ' res += fdict['vcodec'] if fdict.get('vbr') is not None: res += '@' elif fdict.get('vbr') is not None and fdict.get('abr') is not None: res += 'video@' if fdict.get('vbr') is not None: res += '%4dk' % fdict['vbr'] if fdict.get('fps') is not None: if res: res += ', ' res += '%sfps' % fdict['fps'] if fdict.get('acodec') is not None: if res: res += ', ' if fdict['acodec'] == 'none': res += 'video only' else: res += '%-5s' % fdict['acodec'] elif fdict.get('abr') is not None: if res: res += ', ' res += 'audio' if fdict.get('abr') is not None: res += '@%3dk' % fdict['abr'] if fdict.get('asr') is not None: res += ' (%5dHz)' % fdict['asr'] if fdict.get('filesize') is not None: if res: res += ', ' res += format_bytes(fdict['filesize']) elif fdict.get('filesize_approx') is not None: if res: res += ', ' res += '~' + format_bytes(fdict['filesize_approx']) return res def list_formats(self, info_dict): formats = info_dict.get('formats', [info_dict]) table = [ [f['format_id'], f['ext'], self.format_resolution(f), self._format_note(f)] for f in formats if f.get('preference') is None or f['preference'] >= -1000] if len(formats) > 1: table[-1][-1] += (' ' if table[-1][-1] else '') + '(best)' header_line = ['format code', 'extension', 'resolution', 'note'] self.to_screen( '[info] Available formats for %s:\n%s' % (info_dict['id'], render_table(header_line, table))) def list_thumbnails(self, info_dict): thumbnails = info_dict.get('thumbnails') if not thumbnails: self.to_screen('[info] No thumbnails present for %s' % info_dict['id']) return self.to_screen( '[info] Thumbnails for %s:' % info_dict['id']) self.to_screen(render_table( ['ID', 'width', 'height', 'URL'], [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails])) def list_subtitles(self, video_id, subtitles, name='subtitles'): if not subtitles: self.to_screen('%s has no %s' % (video_id, name)) return self.to_screen( 'Available %s for %s:' % (name, video_id)) self.to_screen(render_table( ['Language', 'formats'], [[lang, ', '.join(f['ext'] for f in reversed(formats))] for lang, formats in subtitles.items()])) def urlopen(self, req): """ Start an HTTP download """ if isinstance(req, compat_basestring): req = sanitized_Request(req) return self._opener.open(req, timeout=self._socket_timeout) def print_debug_header(self): if not self.params.get('verbose'): return if type('') is not compat_str: # Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326) self.report_warning( 'Your Python is broken! Update to a newer and supported version') stdout_encoding = getattr( sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__) encoding_str = ( '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % ( locale.getpreferredencoding(), sys.getfilesystemencoding(), stdout_encoding, self.get_encoding())) write_string(encoding_str, encoding=None) self._write_string('[debug] youtube-dl version ' + __version__ + '\n') if _LAZY_LOADER: self._write_string('[debug] Lazy loading extractors enabled' + '\n') try: sp = subprocess.Popen( ['git', 'rev-parse', '--short', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=os.path.dirname(os.path.abspath(__file__))) out, err = sp.communicate() out = out.decode().strip() if re.match('[0-9a-f]+', out): self._write_string('[debug] Git HEAD: ' + out + '\n') except Exception: try: sys.exc_clear() except Exception: pass self._write_string('[debug] Python version %s - %s\n' % ( platform.python_version(), platform_name())) exe_versions = FFmpegPostProcessor.get_versions(self) exe_versions['rtmpdump'] = rtmpdump_version() exe_str = ', '.join( '%s %s' % (exe, v) for exe, v in sorted(exe_versions.items()) if v ) if not exe_str: exe_str = 'none' self._write_string('[debug] exe versions: %s\n' % exe_str) proxy_map = {} for handler in self._opener.handlers: if hasattr(handler, 'proxies'): proxy_map.update(handler.proxies) self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n') if self.params.get('call_home', False): ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8') self._write_string('[debug] Public IP address: %s\n' % ipaddr) latest_version = self.urlopen( 'https://yt-dl.org/latest/version').read().decode('utf-8') if version_tuple(latest_version) > version_tuple(__version__): self.report_warning( 'You are using an outdated version (newest version: %s)! ' 'See https://yt-dl.org/update if you need help updating.' % latest_version) def _setup_opener(self): timeout_val = self.params.get('socket_timeout') self._socket_timeout = 600 if timeout_val is None else float(timeout_val) opts_cookiefile = self.params.get('cookiefile') opts_proxy = self.params.get('proxy') if opts_cookiefile is None: self.cookiejar = compat_cookiejar.CookieJar() else: opts_cookiefile = expand_path(opts_cookiefile) self.cookiejar = compat_cookiejar.MozillaCookieJar( opts_cookiefile) if os.access(opts_cookiefile, os.R_OK): self.cookiejar.load() cookie_processor = YoutubeDLCookieProcessor(self.cookiejar) if opts_proxy is not None: if opts_proxy == '': proxies = {} else: proxies = {'http': opts_proxy, 'https': opts_proxy} else: proxies = compat_urllib_request.getproxies() # Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805) if 'http' in proxies and 'https' not in proxies: proxies['https'] = proxies['http'] proxy_handler = PerRequestProxyHandler(proxies) debuglevel = 1 if self.params.get('debug_printtraffic') else 0 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel) ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel) data_handler = compat_urllib_request_DataHandler() # When passing our own FileHandler instance, build_opener won't add the # default FileHandler and allows us to disable the file protocol, which # can be used for malicious purposes (see # https://github.com/rg3/youtube-dl/issues/8227) file_handler = compat_urllib_request.FileHandler() def file_open(*args, **kwargs): raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in youtube-dl for security reasons') file_handler.file_open = file_open opener = compat_urllib_request.build_opener( proxy_handler, https_handler, cookie_processor, ydlh, data_handler, file_handler) # Delete the default user-agent header, which would otherwise apply in # cases where our custom HTTP handler doesn't come into play # (See https://github.com/rg3/youtube-dl/issues/1309 for details) opener.addheaders = [] self._opener = opener def encode(self, s): if isinstance(s, bytes): return s # Already encoded try: return s.encode(self.get_encoding()) except UnicodeEncodeError as err: err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.' raise def get_encoding(self): encoding = self.params.get('encoding') if encoding is None: encoding = preferredencoding() return encoding def _write_thumbnails(self, info_dict, filename): if self.params.get('writethumbnail', False): thumbnails = info_dict.get('thumbnails') if thumbnails: thumbnails = [thumbnails[-1]] elif self.params.get('write_all_thumbnails', False): thumbnails = info_dict.get('thumbnails') else: return if not thumbnails: # No thumbnails present, so return immediately return for t in thumbnails: thumb_ext = determine_ext(t['url'], 'jpg') suffix = '_%s' % t['id'] if len(thumbnails) > 1 else '' thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else '' t['filename'] = thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)): self.to_screen('[%s] %s: Thumbnail %sis already present' % (info_dict['extractor'], info_dict['id'], thumb_display_id)) else: self.to_screen('[%s] %s: Downloading thumbnail %s...' % (info_dict['extractor'], info_dict['id'], thumb_display_id)) try: uf = self.urlopen(t['url']) with open(encodeFilename(thumb_filename), 'wb') as thumbf: shutil.copyfileobj(uf, thumbf) self.to_screen('[%s] %s: Writing thumbnail %sto: %s' % (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename)) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_warning('Unable to download thumbnail "%s": %s' % (t['url'], error_to_compat_str(err))) #!/usr/bin/env python # coding: utf-8 from __future__ import absolute_import, unicode_literals import collections import contextlib import copy import datetime import errno import fileinput import io import itertools import json import locale import operator import os import platform import re import shutil import subprocess import socket import string import sys import time import tokenize import traceback import random from .compat import ( compat_basestring, compat_cookiejar, compat_get_terminal_size, compat_http_client, compat_kwargs, compat_numeric_types, compat_os_name, compat_str, compat_tokenize_tokenize, compat_urllib_error, compat_urllib_request, compat_urllib_request_DataHandler, ) from .utils import ( age_restricted, args_to_str, ContentTooShortError, date_from_str, DateRange, DEFAULT_OUTTMPL, determine_ext, determine_protocol, DownloadError, encode_compat_str, encodeFilename, error_to_compat_str, expand_path, ExtractorError, format_bytes, formatSeconds, GeoRestrictedError, int_or_none, ISO3166Utils, locked_file, make_HTTPS_handler, MaxDownloadsReached, PagedList, parse_filesize, PerRequestProxyHandler, platform_name, PostProcessingError, preferredencoding, prepend_extension, register_socks_protocols, render_table, replace_extension, SameFileError, sanitize_filename, sanitize_path, sanitize_url, sanitized_Request, std_headers, subtitles_filename, UnavailableVideoError, url_basename, version_tuple, write_json_file, write_string, YoutubeDLCookieProcessor, YoutubeDLHandler, ) from .cache import Cache from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER from .downloader import get_suitable_downloader from .downloader.rtmp import rtmpdump_version from .postprocessor import ( FFmpegFixupM3u8PP, FFmpegFixupM4aPP, FFmpegFixupStretchedPP, FFmpegMergerPP, FFmpegPostProcessor, get_postprocessor, ) from .version import __version__ if compat_os_name == 'nt': import ctypes class YoutubeDL(object): """YoutubeDL class. YoutubeDL objects are the ones responsible of downloading the actual video file and writing it to disk if the user has requested it, among some other tasks. In most cases there should be one per program. As, given a video URL, the downloader doesn't know how to extract all the needed information, task that InfoExtractors do, it has to pass the URL to one of them. For this, YoutubeDL objects have a method that allows InfoExtractors to be registered in a given order. When it is passed a URL, the YoutubeDL object handles it to the first InfoExtractor it finds that reports being able to handle it. The InfoExtractor extracts all the information about the video or videos the URL refers to, and YoutubeDL process the extracted information, possibly using a File Downloader to download the video. YoutubeDL objects accept a lot of parameters. In order not to saturate the object constructor with arguments, it receives a dictionary of options instead. These options are available through the params attribute for the InfoExtractors to use. The YoutubeDL also registers itself as the downloader in charge for the InfoExtractors that are added to it, so this is a "mutual registration". Available options: username: Username for authentication purposes. password: Password for authentication purposes. videopassword: Password for accessing a video. ap_mso: Adobe Pass multiple-system operator identifier. ap_username: Multiple-system operator account username. ap_password: Multiple-system operator account password. usenetrc: Use netrc for authentication instead. verbose: Print additional info to stdout. quiet: Do not print messages to stdout. no_warnings: Do not print out anything for warnings. forceurl: Force printing final URL. forcetitle: Force printing title. forceid: Force printing ID. forcethumbnail: Force printing thumbnail URL. forcedescription: Force printing description. forcefilename: Force printing final filename. forceduration: Force printing duration. forcejson: Force printing info_dict as JSON. dump_single_json: Force printing the info_dict of the whole playlist (or video) as a single JSON line. simulate: Do not download the video files. format: Video format code. See options.py for more information. outtmpl: Template for output names. restrictfilenames: Do not allow "&" and spaces in file names ignoreerrors: Do not stop on download errors. force_generic_extractor: Force downloader to use the generic extractor nooverwrites: Prevent overwriting files. playliststart: Playlist item to start at. playlistend: Playlist item to end at. playlist_items: Specific indices of playlist to download. playlistreverse: Download playlist items in reverse order. playlistrandom: Download playlist items in random order. matchtitle: Download only matching titles. rejecttitle: Reject downloads for matching titles. logger: Log messages to a logging.Logger instance. logtostderr: Log messages to stderr instead of stdout. writedescription: Write the video description to a .description file writeinfojson: Write the video description to a .info.json file writeannotations: Write the video annotations to a .annotations.xml file writethumbnail: Write the thumbnail image to a file write_all_thumbnails: Write all thumbnail formats to files writesubtitles: Write the video subtitles to a file writeautomaticsub: Write the automatically generated subtitles to a file allsubtitles: Downloads all the subtitles of the video (requires writesubtitles or writeautomaticsub) listsubtitles: Lists all available subtitles for the video subtitlesformat: The format code for subtitles subtitleslangs: List of languages of the subtitles to download keepvideo: Keep the video file after post-processing daterange: A DateRange object, download only if the upload_date is in the range. skip_download: Skip the actual download of the video file cachedir: Location of the cache files in the filesystem. False to disable filesystem cache. noplaylist: Download single video instead of a playlist if in doubt. age_limit: An integer representing the user's age in years. Unsuitable videos for the given age are skipped. min_views: An integer representing the minimum view count the video must have in order to not be skipped. Videos without view count information are always downloaded. None for no limit. max_views: An integer representing the maximum view count. Videos that are more popular than that are not downloaded. Videos without view count information are always downloaded. None for no limit. download_archive: File name of a file where all downloads are recorded. Videos already present in the file are not downloaded again. cookiefile: File name where cookies should be read from and dumped to. nocheckcertificate:Do not verify SSL certificates prefer_insecure: Use HTTP instead of HTTPS to retrieve information. At the moment, this is only supported by YouTube. proxy: URL of the proxy server to use geo_verification_proxy: URL of the proxy to use for IP address verification on geo-restricted sites. (Experimental) socket_timeout: Time to wait for unresponsive hosts, in seconds bidi_workaround: Work around buggy terminals without bidirectional text support, using fridibi debug_printtraffic:Print out sent and received HTTP traffic include_ads: Download ads as well default_search: Prepend this string if an input url is not valid. 'auto' for elaborate guessing encoding: Use this encoding instead of the system-specified. extract_flat: Do not resolve URLs, return the immediate result. Pass in 'in_playlist' to only show this behavior for playlist items. postprocessors: A list of dictionaries, each with an entry * key: The name of the postprocessor. See youtube_dl/postprocessor/__init__.py for a list. as well as any further keyword arguments for the postprocessor. progress_hooks: A list of functions that get called on download progress, with a dictionary with the entries * status: One of "downloading", "error", or "finished". Check this first and ignore unknown values. If status is one of "downloading", or "finished", the following properties may also be present: * filename: The final filename (always present) * tmpfilename: The filename we're currently writing to * downloaded_bytes: Bytes on disk * total_bytes: Size of the whole file, None if unknown * total_bytes_estimate: Guess of the eventual file size, None if unavailable. * elapsed: The number of seconds since download started. * eta: The estimated time in seconds, None if unknown * speed: The download speed in bytes/second, None if unknown * fragment_index: The counter of the currently downloaded video fragment. * fragment_count: The number of fragments (= individual files that will be merged) Progress hooks are guaranteed to be called at least once (with status "finished") if the download is successful. merge_output_format: Extension to use when merging formats. fixup: Automatically correct known faults of the file. One of: - "never": do nothing - "warn": only emit a warning - "detect_or_warn": check whether we can do anything about it, warn otherwise (default) source_address: (Experimental) Client-side IP address to bind to. call_home: Boolean, true iff we are allowed to contact the youtube-dl servers for debugging. sleep_interval: Number of seconds to sleep before each download when used alone or a lower bound of a range for randomized sleep before each download (minimum possible number of seconds to sleep) when used along with max_sleep_interval. max_sleep_interval:Upper bound of a range for randomized sleep before each download (maximum possible number of seconds to sleep). Must only be used along with sleep_interval. Actual sleep time will be a random float from range [sleep_interval; max_sleep_interval]. listformats: Print an overview of available video formats and exit. list_thumbnails: Print a table of all thumbnails and exit. match_filter: A function that gets called with the info_dict of every video. If it returns a message, the video is ignored. If it returns None, the video is downloaded. match_filter_func in utils.py is one example for this. no_color: Do not emit color codes in output. geo_bypass: Bypass geographic restriction via faking X-Forwarded-For HTTP header (experimental) geo_bypass_country: Two-letter ISO 3166-2 country code that will be used for explicit geographic restriction bypassing via faking X-Forwarded-For HTTP header (experimental) The following options determine which downloader is picked: external_downloader: Executable of the external downloader to call. None or unset for standard (built-in) downloader. hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv if True, otherwise use ffmpeg/avconv if False, otherwise use downloader suggested by extractor if None. The following parameters are not used by YoutubeDL itself, they are used by the downloader (see youtube_dl/downloader/common.py): nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test, noresizebuffer, retries, continuedl, noprogress, consoletitle, xattr_set_filesize, external_downloader_args, hls_use_mpegts. The following options are used by the post processors: prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available, otherwise prefer avconv. postprocessor_args: A list of additional command-line arguments for the postprocessor. """ _NUMERIC_FIELDS = set(( 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx', 'timestamp', 'upload_year', 'upload_month', 'upload_day', 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count', 'average_rating', 'comment_count', 'age_limit', 'start_time', 'end_time', 'chapter_number', 'season_number', 'episode_number', 'track_number', 'disc_number', 'release_year', 'playlist_index', )) params = None _ies = [] _pps = [] _download_retcode = None _num_downloads = None _screen_file = None def __init__(self, params=None, auto_init=True): """Create a FileDownloader object with the given options.""" if params is None: params = {} self._ies = [] self._ies_instances = {} self._pps = [] self._progress_hooks = [] self._download_retcode = 0 self._num_downloads = 0 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] self._err_file = sys.stderr self.params = { # Default parameters 'nocheckcertificate': False, } self.params.update(params) self.cache = Cache(self) def check_deprecated(param, option, suggestion): if self.params.get(param) is not None: self.report_warning( '%s is deprecated. Use %s instead.' % (option, suggestion)) return True return False if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'): if self.params.get('geo_verification_proxy') is None: self.params['geo_verification_proxy'] = self.params['cn_verification_proxy'] check_deprecated('autonumber_size', '--autonumber-size', 'output template with %(autonumber)0Nd, where N in the number of digits') check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"') check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"') if params.get('bidi_workaround', False): try: import pty master, slave = pty.openpty() width = compat_get_terminal_size().columns if width is None: width_args = [] else: width_args = ['-w', str(width)] sp_kwargs = dict( stdin=subprocess.PIPE, stdout=slave, stderr=self._err_file) try: self._output_process = subprocess.Popen( ['bidiv'] + width_args, **sp_kwargs ) except OSError: self._output_process = subprocess.Popen( ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs) self._output_channel = os.fdopen(master, 'rb') except OSError as ose: if ose.errno == errno.ENOENT: self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.') else: raise if (sys.platform != 'win32' and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] and not params.get('restrictfilenames', False)): # Unicode filesystem API will throw errors (#1474, #13027) self.report_warning( 'Assuming --restrict-filenames since file system encoding ' 'cannot encode all characters. ' 'Set the LC_ALL environment variable to fix this.') self.params['restrictfilenames'] = True if isinstance(params.get('outtmpl'), bytes): self.report_warning( 'Parameter outtmpl is bytes, but should be a unicode string. ' 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.') self._setup_opener() if auto_init: self.print_debug_header() self.add_default_info_extractors() for pp_def_raw in self.params.get('postprocessors', []): pp_class = get_postprocessor(pp_def_raw['key']) pp_def = dict(pp_def_raw) del pp_def['key'] pp = pp_class(self, **compat_kwargs(pp_def)) self.add_post_processor(pp) for ph in self.params.get('progress_hooks', []): self.add_progress_hook(ph) register_socks_protocols() def warn_if_short_id(self, argv): # short YouTube ID starting with dash? idxs = [ i for i, a in enumerate(argv) if re.match(r'^-[0-9A-Za-z_-]{10}$', a)] if idxs: correct_argv = ( ['youtube-dl'] + [a for i, a in enumerate(argv) if i not in idxs] + ['--'] + [argv[i] for i in idxs] ) self.report_warning( 'Long argument string detected. ' 'Use -- to separate parameters and URLs, like this:\n%s\n' % args_to_str(correct_argv)) def add_info_extractor(self, ie): """Add an InfoExtractor object to the end of the list.""" self._ies.append(ie) if not isinstance(ie, type): self._ies_instances[ie.ie_key()] = ie ie.set_downloader(self) def get_info_extractor(self, ie_key): """ Get an instance of an IE with name ie_key, it will try to get one from the _ies list, if there's no instance it will create a new one and add it to the extractor list. """ ie = self._ies_instances.get(ie_key) if ie is None: ie = get_info_extractor(ie_key)() self.add_info_extractor(ie) return ie def add_default_info_extractors(self): """ Add the InfoExtractors returned by gen_extractors to the end of the list """ for ie in gen_extractor_classes(): self.add_info_extractor(ie) def add_post_processor(self, pp): """Add a PostProcessor object to the end of the chain.""" self._pps.append(pp) pp.set_downloader(self) def add_progress_hook(self, ph): """Add the progress hook (currently only for the file downloader)""" self._progress_hooks.append(ph) def _bidi_workaround(self, message): if not hasattr(self, '_output_channel'): return message assert hasattr(self, '_output_process') assert isinstance(message, compat_str) line_count = message.count('\n') + 1 self._output_process.stdin.write((message + '\n').encode('utf-8')) self._output_process.stdin.flush() res = ''.join(self._output_channel.readline().decode('utf-8') for _ in range(line_count)) return res[:-len('\n')] def to_screen(self, message, skip_eol=False): """Print message to stdout if not in quiet mode.""" return self.to_stdout(message, skip_eol, check_quiet=True) def _write_string(self, s, out=None): write_string(s, out=out, encoding=self.params.get('encoding')) def to_stdout(self, message, skip_eol=False, check_quiet=False): """Print message to stdout if not in quiet mode.""" if self.params.get('logger'): self.params['logger'].debug(message) elif not check_quiet or not self.params.get('quiet', False): message = self._bidi_workaround(message) terminator = ['\n', ''][skip_eol] output = message + terminator self._write_string(output, self._screen_file) def to_stderr(self, message): """Print message to stderr.""" assert isinstance(message, compat_str) if self.params.get('logger'): self.params['logger'].error(message) else: message = self._bidi_workaround(message) output = message + '\n' self._write_string(output, self._err_file) def to_console_title(self, message): if not self.params.get('consoletitle', False): return if compat_os_name == 'nt': if ctypes.windll.kernel32.GetConsoleWindow(): # c_wchar_p() might not be necessary if `message` is # already of type unicode() ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) elif 'TERM' in os.environ: self._write_string('\033]0;%s\007' % message, self._screen_file) def save_console_title(self): if not self.params.get('consoletitle', False): return if compat_os_name != 'nt' and 'TERM' in os.environ: # Save the title on stack self._write_string('\033[22;0t', self._screen_file) def restore_console_title(self): if not self.params.get('consoletitle', False): return if compat_os_name != 'nt' and 'TERM' in os.environ: # Restore the title from stack self._write_string('\033[23;0t', self._screen_file) def __enter__(self): self.save_console_title() return self def __exit__(self, *args): self.restore_console_title() if self.params.get('cookiefile') is not None: self.cookiejar.save() def trouble(self, message=None, tb=None): """Determine action to take when a download problem appears. Depending on if the downloader has been configured to ignore download errors or not, this method may throw an exception or not when errors are found, after printing the message. tb, if given, is additional traceback information. """ if message is not None: self.to_stderr(message) if self.params.get('verbose'): if tb is None: if sys.exc_info()[0]: # if .trouble has been called from an except block tb = '' if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info)) tb += encode_compat_str(traceback.format_exc()) else: tb_data = traceback.format_list(traceback.extract_stack()) tb = ''.join(tb_data) self.to_stderr(tb) if not self.params.get('ignoreerrors', False): if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: exc_info = sys.exc_info()[1].exc_info else: exc_info = sys.exc_info() raise DownloadError(message, exc_info) self._download_retcode = 1 def report_warning(self, message): ''' Print the message to stderr, it will be prefixed with 'WARNING:' If stderr is a tty file the 'WARNING:' will be colored ''' if self.params.get('logger') is not None: self.params['logger'].warning(message) else: if self.params.get('no_warnings'): return if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt': _msg_header = '\033[0;33mWARNING:\033[0m' else: _msg_header = 'WARNING:' warning_message = '%s %s' % (_msg_header, message) self.to_stderr(warning_message) def report_error(self, message, tb=None): ''' Do the same as trouble, but prefixes the message with 'ERROR:', colored in red if stderr is a tty file. ''' if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt': _msg_header = '\033[0;31mERROR:\033[0m' else: _msg_header = 'ERROR:' error_message = '%s %s' % (_msg_header, message) self.trouble(error_message, tb) def report_file_already_downloaded(self, file_name): """Report file has already been fully downloaded.""" try: self.to_screen('[download] %s has already been downloaded' % file_name) except UnicodeEncodeError: self.to_screen('[download] The file has already been downloaded') def prepare_filename(self, info_dict): """Generate the output filename.""" try: template_dict = dict(info_dict) template_dict['epoch'] = int(time.time()) autonumber_size = self.params.get('autonumber_size') if autonumber_size is None: autonumber_size = 5 template_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads if template_dict.get('resolution') is None: if template_dict.get('width') and template_dict.get('height'): template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height']) elif template_dict.get('height'): template_dict['resolution'] = '%sp' % template_dict['height'] elif template_dict.get('width'): template_dict['resolution'] = '%dx?' % template_dict['width'] sanitize = lambda k, v: sanitize_filename( compat_str(v), restricted=self.params.get('restrictfilenames'), is_id=(k == 'id' or k.endswith('_id'))) template_dict = dict((k, v if isinstance(v, compat_numeric_types) else sanitize(k, v)) for k, v in template_dict.items() if v is not None and not isinstance(v, (list, tuple, dict))) template_dict = collections.defaultdict(lambda: 'NA', template_dict) outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) # For fields playlist_index and autonumber convert all occurrences # of %(field)s to %(field)0Nd for backward compatibility field_size_compat_map = { 'playlist_index': len(str(template_dict['n_entries'])), 'autonumber': autonumber_size, } FIELD_SIZE_COMPAT_RE = r'(?<!%)%\((?P<field>autonumber|playlist_index)\)s' mobj = re.search(FIELD_SIZE_COMPAT_RE, outtmpl) if mobj: outtmpl = re.sub( FIELD_SIZE_COMPAT_RE, r'%%(\1)0%dd' % field_size_compat_map[mobj.group('field')], outtmpl) # Missing numeric fields used together with integer presentation types # in format specification will break the argument substitution since # string 'NA' is returned for missing fields. We will patch output # template for missing fields to meet string presentation type. for numeric_field in self._NUMERIC_FIELDS: if numeric_field not in template_dict: # As of [1] format syntax is: # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting FORMAT_RE = r'''(?x) (?<!%) % \({0}\) # mapping key (?:[#0\-+ ]+)? # conversion flags (optional) (?:\d+)? # minimum field width (optional) (?:\.\d+)? # precision (optional) [hlL]? # length modifier (optional) [diouxXeEfFgGcrs%] # conversion type ''' outtmpl = re.sub( FORMAT_RE.format(numeric_field), r'%({0})s'.format(numeric_field), outtmpl) # expand_path translates '%%' into '%' and '$$' into '$' # correspondingly that is not what we want since we need to keep # '%%' intact for template dict substitution step. Working around # with boundary-alike separator hack. sep = ''.join([random.choice(string.ascii_letters) for _ in range(32)]) outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep)) # outtmpl should be expand_path'ed before template dict substitution # because meta fields may contain env variables we don't want to # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and # title "Hello $PATH", we don't want `$PATH` to be expanded. filename = expand_path(outtmpl).replace(sep, '') % template_dict # Temporary fix for #4787 # 'Treat' all problem characters by passing filename through preferredencoding # to workaround encoding issues with subprocess on python2 @ Windows if sys.version_info < (3, 0) and sys.platform == 'win32': filename = encodeFilename(filename, True).decode(preferredencoding()) return sanitize_path(filename) except ValueError as err: self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')') return None def _match_entry(self, info_dict, incomplete): """ Returns None iff the file should be downloaded """ video_title = info_dict.get('title', info_dict.get('id', 'video')) if 'title' in info_dict: # This can happen when we're just evaluating the playlist title = info_dict['title'] matchtitle = self.params.get('matchtitle', False) if matchtitle: if not re.search(matchtitle, title, re.IGNORECASE): return '"' + title + '" title did not match pattern "' + matchtitle + '"' rejecttitle = self.params.get('rejecttitle', False) if rejecttitle: if re.search(rejecttitle, title, re.IGNORECASE): return '"' + title + '" title matched reject pattern "' + rejecttitle + '"' date = info_dict.get('upload_date') if date is not None: dateRange = self.params.get('daterange', DateRange()) if date not in dateRange: return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange) view_count = info_dict.get('view_count') if view_count is not None: min_views = self.params.get('min_views') if min_views is not None and view_count < min_views: return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views) max_views = self.params.get('max_views') if max_views is not None and view_count > max_views: return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views) if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')): return 'Skipping "%s" because it is age restricted' % video_title if self.in_download_archive(info_dict): return '%s has already been recorded in archive' % video_title if not incomplete: match_filter = self.params.get('match_filter') if match_filter is not None: ret = match_filter(info_dict) if ret is not None: return ret return None @staticmethod def add_extra_info(info_dict, extra_info): '''Set the keys from extra_info in info dict if they are missing''' for key, value in extra_info.items(): info_dict.setdefault(key, value) def extract_info(self, url, download=True, ie_key=None, extra_info={}, process=True, force_generic_extractor=False): ''' Returns a list with a dictionary for each video we find. If 'download', also downloads the videos. extra_info is a dict containing the extra values to add to each result ''' if not ie_key and force_generic_extractor: ie_key = 'Generic' if ie_key: ies = [self.get_info_extractor(ie_key)] else: ies = self._ies for ie in ies: if not ie.suitable(url): continue ie = self.get_info_extractor(ie.ie_key()) if not ie.working(): self.report_warning('The program functionality for this site has been marked as broken, ' 'and will probably not work.') try: ie_result = ie.extract(url) if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here) break if isinstance(ie_result, list): # Backwards compatibility: old IE result format ie_result = { '_type': 'compat_list', 'entries': ie_result, } self.add_default_extra_info(ie_result, ie, url) if process: return self.process_ie_result(ie_result, download, extra_info) else: return ie_result except GeoRestrictedError as e: msg = e.msg if e.countries: msg += '\nThis video is available in %s.' % ', '.join( map(ISO3166Utils.short2full, e.countries)) msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.' self.report_error(msg) break except ExtractorError as e: # An error we somewhat expected self.report_error(compat_str(e), e.format_traceback()) break except MaxDownloadsReached: raise except Exception as e: if self.params.get('ignoreerrors', False): self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc())) break else: raise else: self.report_error('no suitable InfoExtractor for URL %s' % url) def add_default_extra_info(self, ie_result, ie, url): self.add_extra_info(ie_result, { 'extractor': ie.IE_NAME, 'webpage_url': url, 'webpage_url_basename': url_basename(url), 'extractor_key': ie.ie_key(), }) def process_ie_result(self, ie_result, download=True, extra_info={}): """ Take the result of the ie(may be modified) and resolve all unresolved references (URLs, playlist items). It will also download the videos if 'download'. Returns the resolved ie_result. """ result_type = ie_result.get('_type', 'video') if result_type in ('url', 'url_transparent'): ie_result['url'] = sanitize_url(ie_result['url']) extract_flat = self.params.get('extract_flat', False) if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or extract_flat is True): if self.params.get('forcejson', False): self.to_stdout(json.dumps(ie_result)) return ie_result if result_type == 'video': self.add_extra_info(ie_result, extra_info) return self.process_video_result(ie_result, download=download) elif result_type == 'url': # We have to add extra_info to the results because it may be # contained in a playlist return self.extract_info(ie_result['url'], download, ie_key=ie_result.get('ie_key'), extra_info=extra_info) elif result_type == 'url_transparent': # Use the information from the embedding page info = self.extract_info( ie_result['url'], ie_key=ie_result.get('ie_key'), extra_info=extra_info, download=False, process=False) # extract_info may return None when ignoreerrors is enabled and # extraction failed with an error, don't crash and return early # in this case if not info: return info force_properties = dict( (k, v) for k, v in ie_result.items() if v is not None) for f in ('_type', 'url', 'ie_key'): if f in force_properties: del force_properties[f] new_result = info.copy() new_result.update(force_properties) # Extracted info may not be a video result (i.e. # info.get('_type', 'video') != video) but rather an url or # url_transparent. In such cases outer metadata (from ie_result) # should be propagated to inner one (info). For this to happen # _type of info should be overridden with url_transparent. This # fixes issue from https://github.com/rg3/youtube-dl/pull/11163. if new_result.get('_type') == 'url': new_result['_type'] = 'url_transparent' return self.process_ie_result( new_result, download=download, extra_info=extra_info) elif result_type in ('playlist', 'multi_video'): # We process each entry in the playlist playlist = ie_result.get('title') or ie_result.get('id') self.to_screen('[download] Downloading playlist: %s' % playlist) playlist_results = [] playliststart = self.params.get('playliststart', 1) - 1 playlistend = self.params.get('playlistend') # For backwards compatibility, interpret -1 as whole list if playlistend == -1: playlistend = None playlistitems_str = self.params.get('playlist_items') playlistitems = None if playlistitems_str is not None: def iter_playlistitems(format): for string_segment in format.split(','): if '-' in string_segment: start, end = string_segment.split('-') for item in range(int(start), int(end) + 1): yield int(item) else: yield int(string_segment) playlistitems = iter_playlistitems(playlistitems_str) ie_entries = ie_result['entries'] if isinstance(ie_entries, list): n_all_entries = len(ie_entries) if playlistitems: entries = [ ie_entries[i - 1] for i in playlistitems if -n_all_entries <= i - 1 < n_all_entries] else: entries = ie_entries[playliststart:playlistend] n_entries = len(entries) self.to_screen( '[%s] playlist %s: Collected %d video ids (downloading %d of them)' % (ie_result['extractor'], playlist, n_all_entries, n_entries)) elif isinstance(ie_entries, PagedList): if playlistitems: entries = [] for item in playlistitems: entries.extend(ie_entries.getslice( item - 1, item )) else: entries = ie_entries.getslice( playliststart, playlistend) n_entries = len(entries) self.to_screen( '[%s] playlist %s: Downloading %d videos' % (ie_result['extractor'], playlist, n_entries)) else: # iterable if playlistitems: entry_list = list(ie_entries) entries = [entry_list[i - 1] for i in playlistitems] else: entries = list(itertools.islice( ie_entries, playliststart, playlistend)) n_entries = len(entries) self.to_screen( '[%s] playlist %s: Downloading %d videos' % (ie_result['extractor'], playlist, n_entries)) if self.params.get('playlistreverse', False): entries = entries[::-1] if self.params.get('playlistrandom', False): random.shuffle(entries) x_forwarded_for = ie_result.get('__x_forwarded_for_ip') for i, entry in enumerate(entries, 1): self.to_screen('[download] Downloading video %s of %s' % (i, n_entries)) # This __x_forwarded_for_ip thing is a bit ugly but requires # minimal changes if x_forwarded_for: entry['__x_forwarded_for_ip'] = x_forwarded_for extra = { 'n_entries': n_entries, 'playlist': playlist, 'playlist_id': ie_result.get('id'), 'playlist_title': ie_result.get('title'), 'playlist_index': i + playliststart, 'extractor': ie_result['extractor'], 'webpage_url': ie_result['webpage_url'], 'webpage_url_basename': url_basename(ie_result['webpage_url']), 'extractor_key': ie_result['extractor_key'], } reason = self._match_entry(entry, incomplete=True) if reason is not None: self.to_screen('[download] ' + reason) continue entry_result = self.process_ie_result(entry, download=download, extra_info=extra) playlist_results.append(entry_result) ie_result['entries'] = playlist_results self.to_screen('[download] Finished downloading playlist: %s' % playlist) return ie_result elif result_type == 'compat_list': self.report_warning( 'Extractor %s returned a compat_list result. ' 'It needs to be updated.' % ie_result.get('extractor')) def _fixup(r): self.add_extra_info( r, { 'extractor': ie_result['extractor'], 'webpage_url': ie_result['webpage_url'], 'webpage_url_basename': url_basename(ie_result['webpage_url']), 'extractor_key': ie_result['extractor_key'], } ) return r ie_result['entries'] = [ self.process_ie_result(_fixup(r), download, extra_info) for r in ie_result['entries'] ] return ie_result else: raise Exception('Invalid result type: %s' % result_type) def _build_format_filter(self, filter_spec): " Returns a function to filter the formats according to the filter_spec " OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>width|height|tbr|abr|vbr|asr|filesize|fps) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?) $ ''' % '|'.join(map(re.escape, OPERATORS.keys()))) m = operator_rex.search(filter_spec) if m: try: comparison_value = int(m.group('value')) except ValueError: comparison_value = parse_filesize(m.group('value')) if comparison_value is None: comparison_value = parse_filesize(m.group('value') + 'B') if comparison_value is None: raise ValueError( 'Invalid value %r in format specification %r' % ( m.group('value'), filter_spec)) op = OPERATORS[m.group('op')] if not m: STR_OPERATORS = { '=': operator.eq, '!=': operator.ne, '^=': lambda attr, value: attr.startswith(value), '$=': lambda attr, value: attr.endswith(value), '*=': lambda attr, value: value in attr, } str_operator_rex = re.compile(r'''(?x) \s*(?P<key>ext|acodec|vcodec|container|protocol|format_id) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)? \s*(?P<value>[a-zA-Z0-9._-]+) \s*$ ''' % '|'.join(map(re.escape, STR_OPERATORS.keys()))) m = str_operator_rex.search(filter_spec) if m: comparison_value = m.group('value') op = STR_OPERATORS[m.group('op')] if not m: raise ValueError('Invalid filter specification %r' % filter_spec) def _filter(f): actual_value = f.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) return _filter def build_format_selector(self, format_spec): def syntax_error(note, start): message = ( 'Invalid format specification: ' '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1])) return SyntaxError(message) PICKFIRST = 'PICKFIRST' MERGE = 'MERGE' SINGLE = 'SINGLE' GROUP = 'GROUP' FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters']) def _parse_filter(tokens): filter_parts = [] for type, string, start, _, _ in tokens: if type == tokenize.OP and string == ']': return ''.join(filter_parts) else: filter_parts.append(string) def _remove_unused_ops(tokens): # Remove operators that we don't use and join them with the surrounding strings # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9' ALLOWED_OPS = ('/', '+', ',', '(', ')') last_string, last_start, last_end, last_line = None, None, None, None for type, string, start, end, line in tokens: if type == tokenize.OP and string == '[': if last_string: yield tokenize.NAME, last_string, last_start, last_end, last_line last_string = None yield type, string, start, end, line # everything inside brackets will be handled by _parse_filter for type, string, start, end, line in tokens: yield type, string, start, end, line if type == tokenize.OP and string == ']': break elif type == tokenize.OP and string in ALLOWED_OPS: if last_string: yield tokenize.NAME, last_string, last_start, last_end, last_line last_string = None yield type, string, start, end, line elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]: if not last_string: last_string = string last_start = start last_end = end else: last_string += string if last_string: yield tokenize.NAME, last_string, last_start, last_end, last_line def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False): selectors = [] current_selector = None for type, string, start, _, _ in tokens: # ENCODING is only defined in python 3.x if type == getattr(tokenize, 'ENCODING', None): continue elif type in [tokenize.NAME, tokenize.NUMBER]: current_selector = FormatSelector(SINGLE, string, []) elif type == tokenize.OP: if string == ')': if not inside_group: # ')' will be handled by the parentheses group tokens.restore_last_token() break elif inside_merge and string in ['/', ',']: tokens.restore_last_token() break elif inside_choice and string == ',': tokens.restore_last_token() break elif string == ',': if not current_selector: raise syntax_error('"," must follow a format selector', start) selectors.append(current_selector) current_selector = None elif string == '/': if not current_selector: raise syntax_error('"/" must follow a format selector', start) first_choice = current_selector second_choice = _parse_format_selection(tokens, inside_choice=True) current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), []) elif string == '[': if not current_selector: current_selector = FormatSelector(SINGLE, 'best', []) format_filter = _parse_filter(tokens) current_selector.filters.append(format_filter) elif string == '(': if current_selector: raise syntax_error('Unexpected "("', start) group = _parse_format_selection(tokens, inside_group=True) current_selector = FormatSelector(GROUP, group, []) elif string == '+': video_selector = current_selector audio_selector = _parse_format_selection(tokens, inside_merge=True) if not video_selector or not audio_selector: raise syntax_error('"+" must be between two format selectors', start) current_selector = FormatSelector(MERGE, (video_selector, audio_selector), []) else: raise syntax_error('Operator not recognized: "{0}"'.format(string), start) elif type == tokenize.ENDMARKER: break if current_selector: selectors.append(current_selector) return selectors def _build_selector_function(selector): if isinstance(selector, list): fs = [_build_selector_function(s) for s in selector] def selector_function(ctx): for f in fs: for format in f(ctx): yield format return selector_function elif selector.type == GROUP: selector_function = _build_selector_function(selector.selector) elif selector.type == PICKFIRST: fs = [_build_selector_function(s) for s in selector.selector] def selector_function(ctx): for f in fs: picked_formats = list(f(ctx)) if picked_formats: return picked_formats return [] elif selector.type == SINGLE: format_spec = selector.selector def selector_function(ctx): formats = list(ctx['formats']) if not formats: return if format_spec == 'all': for f in formats: yield f elif format_spec in ['best', 'worst', None]: format_idx = 0 if format_spec == 'worst' else -1 audiovideo_formats = [ f for f in formats if f.get('vcodec') != 'none' and f.get('acodec') != 'none'] if audiovideo_formats: yield audiovideo_formats[format_idx] # for extractors with incomplete formats (audio only (soundcloud) # or video only (imgur)) we will fallback to best/worst # {video,audio}-only format elif ctx['incomplete_formats']: yield formats[format_idx] elif format_spec == 'bestaudio': audio_formats = [ f for f in formats if f.get('vcodec') == 'none'] if audio_formats: yield audio_formats[-1] elif format_spec == 'worstaudio': audio_formats = [ f for f in formats if f.get('vcodec') == 'none'] if audio_formats: yield audio_formats[0] elif format_spec == 'bestvideo': video_formats = [ f for f in formats if f.get('acodec') == 'none'] if video_formats: yield video_formats[-1] elif format_spec == 'worstvideo': video_formats = [ f for f in formats if f.get('acodec') == 'none'] if video_formats: yield video_formats[0] else: extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav'] if format_spec in extensions: filter_f = lambda f: f['ext'] == format_spec else: filter_f = lambda f: f['format_id'] == format_spec matches = list(filter(filter_f, formats)) if matches: yield matches[-1] elif selector.type == MERGE: def _merge(formats_info): format_1, format_2 = [f['format_id'] for f in formats_info] # The first format must contain the video and the # second the audio if formats_info[0].get('vcodec') == 'none': self.report_error('The first format must ' 'contain the video, try using ' '"-f %s+%s"' % (format_2, format_1)) return # Formats must be opposite (video+audio) if formats_info[0].get('acodec') == 'none' and formats_info[1].get('acodec') == 'none': self.report_error( 'Both formats %s and %s are video-only, you must specify "-f video+audio"' % (format_1, format_2)) return output_ext = ( formats_info[0]['ext'] if self.params.get('merge_output_format') is None else self.params['merge_output_format']) return { 'requested_formats': formats_info, 'format': '%s+%s' % (formats_info[0].get('format'), formats_info[1].get('format')), 'format_id': '%s+%s' % (formats_info[0].get('format_id'), formats_info[1].get('format_id')), 'width': formats_info[0].get('width'), 'height': formats_info[0].get('height'), 'resolution': formats_info[0].get('resolution'), 'fps': formats_info[0].get('fps'), 'vcodec': formats_info[0].get('vcodec'), 'vbr': formats_info[0].get('vbr'), 'stretched_ratio': formats_info[0].get('stretched_ratio'), 'acodec': formats_info[1].get('acodec'), 'abr': formats_info[1].get('abr'), 'ext': output_ext, } video_selector, audio_selector = map(_build_selector_function, selector.selector) def selector_function(ctx): for pair in itertools.product( video_selector(copy.deepcopy(ctx)), audio_selector(copy.deepcopy(ctx))): yield _merge(pair) filters = [self._build_format_filter(f) for f in selector.filters] def final_selector(ctx): ctx_copy = copy.deepcopy(ctx) for _filter in filters: ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats'])) return selector_function(ctx_copy) return final_selector stream = io.BytesIO(format_spec.encode('utf-8')) try: tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline))) except tokenize.TokenError: raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec))) class TokenIterator(object): def __init__(self, tokens): self.tokens = tokens self.counter = 0 def __iter__(self): return self def __next__(self): if self.counter >= len(self.tokens): raise StopIteration() value = self.tokens[self.counter] self.counter += 1 return value next = __next__ def restore_last_token(self): self.counter -= 1 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens))) return _build_selector_function(parsed_selector) def _calc_headers(self, info_dict): res = std_headers.copy() add_headers = info_dict.get('http_headers') if add_headers: res.update(add_headers) cookies = self._calc_cookies(info_dict) if cookies: res['Cookie'] = cookies if 'X-Forwarded-For' not in res: x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip') if x_forwarded_for_ip: res['X-Forwarded-For'] = x_forwarded_for_ip return res def _calc_cookies(self, info_dict): pr = sanitized_Request(info_dict['url']) self.cookiejar.add_cookie_header(pr) return pr.get_header('Cookie') def process_video_result(self, info_dict, download=True): assert info_dict.get('_type', 'video') == 'video' if 'id' not in info_dict: raise ExtractorError('Missing "id" field in extractor result') if 'title' not in info_dict: raise ExtractorError('Missing "title" field in extractor result') def report_force_conversion(field, field_not, conversion): self.report_warning( '"%s" field is not %s - forcing %s conversion, there is an error in extractor' % (field, field_not, conversion)) def sanitize_string_field(info, string_field): field = info.get(string_field) if field is None or isinstance(field, compat_str): return report_force_conversion(string_field, 'a string', 'string') info[string_field] = compat_str(field) def sanitize_numeric_fields(info): for numeric_field in self._NUMERIC_FIELDS: field = info.get(numeric_field) if field is None or isinstance(field, compat_numeric_types): continue report_force_conversion(numeric_field, 'numeric', 'int') info[numeric_field] = int_or_none(field) sanitize_string_field(info_dict, 'id') sanitize_numeric_fields(info_dict) if 'playlist' not in info_dict: # It isn't part of a playlist info_dict['playlist'] = None info_dict['playlist_index'] = None thumbnails = info_dict.get('thumbnails') if thumbnails is None: thumbnail = info_dict.get('thumbnail') if thumbnail: info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}] if thumbnails: thumbnails.sort(key=lambda t: ( t.get('preference') if t.get('preference') is not None else -1, t.get('width') if t.get('width') is not None else -1, t.get('height') if t.get('height') is not None else -1, t.get('id') if t.get('id') is not None else '', t.get('url'))) for i, t in enumerate(thumbnails): t['url'] = sanitize_url(t['url']) if t.get('width') and t.get('height'): t['resolution'] = '%dx%d' % (t['width'], t['height']) if t.get('id') is None: t['id'] = '%d' % i if self.params.get('list_thumbnails'): self.list_thumbnails(info_dict) return thumbnail = info_dict.get('thumbnail') if thumbnail: info_dict['thumbnail'] = sanitize_url(thumbnail) elif thumbnails: info_dict['thumbnail'] = thumbnails[-1]['url'] if 'display_id' not in info_dict and 'id' in info_dict: info_dict['display_id'] = info_dict['id'] if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None: # Working around out-of-range timestamp values (e.g. negative ones on Windows, # see http://bugs.python.org/issue1646728) try: upload_date = datetime.datetime.utcfromtimestamp(info_dict['timestamp']) info_dict['upload_date'] = upload_date.strftime('%Y%m%d') except (ValueError, OverflowError, OSError): pass # Auto generate title fields corresponding to the *_number fields when missing # in order to always have clean titles. This is very common for TV series. for field in ('chapter', 'season', 'episode'): if info_dict.get('%s_number' % field) is not None and not info_dict.get(field): info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field]) subtitles = info_dict.get('subtitles') if subtitles: for _, subtitle in subtitles.items(): for subtitle_format in subtitle: if subtitle_format.get('url'): subtitle_format['url'] = sanitize_url(subtitle_format['url']) if subtitle_format.get('ext') is None: subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower() if self.params.get('listsubtitles', False): if 'automatic_captions' in info_dict: self.list_subtitles(info_dict['id'], info_dict.get('automatic_captions'), 'automatic captions') self.list_subtitles(info_dict['id'], subtitles, 'subtitles') return info_dict['requested_subtitles'] = self.process_subtitles( info_dict['id'], subtitles, info_dict.get('automatic_captions')) # We now pick which formats have to be downloaded if info_dict.get('formats') is None: # There's only one format available formats = [info_dict] else: formats = info_dict['formats'] if not formats: raise ExtractorError('No video formats found!') def is_wellformed(f): url = f.get('url') valid_url = url and isinstance(url, compat_str) if not valid_url: self.report_warning( '"url" field is missing or empty - skipping format, ' 'there is an error in extractor') return valid_url # Filter out malformed formats for better extraction robustness formats = list(filter(is_wellformed, formats)) formats_dict = {} # We check that all the formats have the format and format_id fields for i, format in enumerate(formats): sanitize_string_field(format, 'format_id') sanitize_numeric_fields(format) format['url'] = sanitize_url(format['url']) if format.get('format_id') is None: format['format_id'] = compat_str(i) else: # Sanitize format_id from characters used in format selector expression format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id']) format_id = format['format_id'] if format_id not in formats_dict: formats_dict[format_id] = [] formats_dict[format_id].append(format) # Make sure all formats have unique format_id for format_id, ambiguous_formats in formats_dict.items(): if len(ambiguous_formats) > 1: for i, format in enumerate(ambiguous_formats): format['format_id'] = '%s-%d' % (format_id, i) for i, format in enumerate(formats): if format.get('format') is None: format['format'] = '{id} - {res}{note}'.format( id=format['format_id'], res=self.format_resolution(format), note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '', ) # Automatically determine file extension if missing if format.get('ext') is None: format['ext'] = determine_ext(format['url']).lower() # Automatically determine protocol if missing (useful for format # selection purposes) if format.get('protocol') is None: format['protocol'] = determine_protocol(format) # Add HTTP headers, so that external programs can use them from the # json output full_format_info = info_dict.copy() full_format_info.update(format) format['http_headers'] = self._calc_headers(full_format_info) # Remove private housekeeping stuff if '__x_forwarded_for_ip' in info_dict: del info_dict['__x_forwarded_for_ip'] # TODO Central sorting goes here if formats[0] is not info_dict: # only set the 'formats' fields if the original info_dict list them # otherwise we end up with a circular reference, the first (and unique) # element in the 'formats' field in info_dict is info_dict itself, # which can't be exported to json info_dict['formats'] = formats if self.params.get('listformats'): self.list_formats(info_dict) return req_format = self.params.get('format') if req_format is None: req_format_list = [] if (self.params.get('outtmpl', DEFAULT_OUTTMPL) != '-' and not info_dict.get('is_live')): merger = FFmpegMergerPP(self) if merger.available and merger.can_merge(): req_format_list.append('bestvideo+bestaudio') req_format_list.append('best') req_format = '/'.join(req_format_list) format_selector = self.build_format_selector(req_format) # While in format selection we may need to have an access to the original # format set in order to calculate some metrics or do some processing. # For now we need to be able to guess whether original formats provided # by extractor are incomplete or not (i.e. whether extractor provides only # video-only or audio-only formats) for proper formats selection for # extractors with such incomplete formats (see # https://github.com/rg3/youtube-dl/pull/5556). # Since formats may be filtered during format selection and may not match # the original formats the results may be incorrect. Thus original formats # or pre-calculated metrics should be passed to format selection routines # as well. # We will pass a context object containing all necessary additional data # instead of just formats. # This fixes incorrect format selection issue (see # https://github.com/rg3/youtube-dl/issues/10083). incomplete_formats = ( # All formats are video-only or all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats) or # all formats are audio-only all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)) ctx = { 'formats': formats, 'incomplete_formats': incomplete_formats, } formats_to_download = list(format_selector(ctx)) if not formats_to_download: raise ExtractorError('requested format not available', expected=True) if download: if len(formats_to_download) > 1: self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download))) for format in formats_to_download: new_info = dict(info_dict) new_info.update(format) self.process_info(new_info) # We update the info dict with the best quality format (backwards compatibility) info_dict.update(formats_to_download[-1]) return info_dict def process_subtitles(self, video_id, normal_subtitles, automatic_captions): """Select the requested subtitles and their format""" available_subs = {} if normal_subtitles and self.params.get('writesubtitles'): available_subs.update(normal_subtitles) if automatic_captions and self.params.get('writeautomaticsub'): for lang, cap_info in automatic_captions.items(): if lang not in available_subs: available_subs[lang] = cap_info if (not self.params.get('writesubtitles') and not self.params.get('writeautomaticsub') or not available_subs): return None if self.params.get('allsubtitles', False): requested_langs = available_subs.keys() else: if self.params.get('subtitleslangs', False): requested_langs = self.params.get('subtitleslangs') elif 'en' in available_subs: requested_langs = ['en'] else: requested_langs = [list(available_subs.keys())[0]] formats_query = self.params.get('subtitlesformat', 'best') formats_preference = formats_query.split('/') if formats_query else [] subs = {} for lang in requested_langs: formats = available_subs.get(lang) if formats is None: self.report_warning('%s subtitles not available for %s' % (lang, video_id)) continue for ext in formats_preference: if ext == 'best': f = formats[-1] break matches = list(filter(lambda f: f['ext'] == ext, formats)) if matches: f = matches[-1] break else: f = formats[-1] self.report_warning( 'No subtitle format found matching "%s" for language %s, ' 'using %s' % (formats_query, lang, f['ext'])) subs[lang] = f return subs def process_info(self, info_dict): """Process a single resolved IE result.""" assert info_dict.get('_type', 'video') == 'video' max_downloads = self.params.get('max_downloads') if max_downloads is not None: if self._num_downloads >= int(max_downloads): raise MaxDownloadsReached() info_dict['fulltitle'] = info_dict['title'] if len(info_dict['title']) > 200: info_dict['title'] = info_dict['title'][:197] + '...' if 'format' not in info_dict: info_dict['format'] = info_dict['ext'] reason = self._match_entry(info_dict, incomplete=False) if reason is not None: self.to_screen('[download] ' + reason) return self._num_downloads += 1 info_dict['_filename'] = filename = self.prepare_filename(info_dict) # Forced printings if self.params.get('forcetitle', False): self.to_stdout(info_dict['fulltitle']) if self.params.get('forceid', False): self.to_stdout(info_dict['id']) if self.params.get('forceurl', False): if info_dict.get('requested_formats') is not None: for f in info_dict['requested_formats']: self.to_stdout(f['url'] + f.get('play_path', '')) else: # For RTMP URLs, also include the playpath self.to_stdout(info_dict['url'] + info_dict.get('play_path', '')) if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None: self.to_stdout(info_dict['thumbnail']) if self.params.get('forcedescription', False) and info_dict.get('description') is not None: self.to_stdout(info_dict['description']) if self.params.get('forcefilename', False) and filename is not None: self.to_stdout(filename) if self.params.get('forceduration', False) and info_dict.get('duration') is not None: self.to_stdout(formatSeconds(info_dict['duration'])) if self.params.get('forceformat', False): self.to_stdout(info_dict['format']) if self.params.get('forcejson', False): self.to_stdout(json.dumps(info_dict)) # Do nothing else if in simulate mode if self.params.get('simulate', False): return if filename is None: return try: dn = os.path.dirname(sanitize_path(encodeFilename(filename))) if dn and not os.path.exists(dn): os.makedirs(dn) except (OSError, IOError) as err: self.report_error('unable to create directory ' + error_to_compat_str(err)) return if self.params.get('writedescription', False): descfn = replace_extension(filename, 'description', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)): self.to_screen('[info] Video description is already present') elif info_dict.get('description') is None: self.report_warning('There\'s no description to write.') else: try: self.to_screen('[info] Writing video description to: ' + descfn) with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: descfile.write(info_dict['description']) except (OSError, IOError): self.report_error('Cannot write description file ' + descfn) return if self.params.get('writeannotations', False): annofn = replace_extension(filename, 'annotations.xml', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)): self.to_screen('[info] Video annotations are already present') else: try: self.to_screen('[info] Writing video annotations to: ' + annofn) with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile: annofile.write(info_dict['annotations']) except (KeyError, TypeError): self.report_warning('There are no annotations to write.') except (OSError, IOError): self.report_error('Cannot write annotations file: ' + annofn) return subtitles_are_requested = any([self.params.get('writesubtitles', False), self.params.get('writeautomaticsub')]) if subtitles_are_requested and info_dict.get('requested_subtitles'): # subtitles download errors are already managed as troubles in relevant IE # that way it will silently go on when used with unsupporting IE subtitles = info_dict['requested_subtitles'] ie = self.get_info_extractor(info_dict['extractor_key']) for sub_lang, sub_info in subtitles.items(): sub_format = sub_info['ext'] if sub_info.get('data') is not None: sub_data = sub_info['data'] else: try: sub_data = ie._download_webpage( sub_info['url'], info_dict['id'], note=False) except ExtractorError as err: self.report_warning('Unable to download subtitle for "%s": %s' % (sub_lang, error_to_compat_str(err.cause))) continue try: sub_filename = subtitles_filename(filename, sub_lang, sub_format) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)): self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format)) else: self.to_screen('[info] Writing video subtitles to: ' + sub_filename) # Use newline='' to prevent conversion of newline characters # See https://github.com/rg3/youtube-dl/issues/10268 with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile: subfile.write(sub_data) except (OSError, IOError): self.report_error('Cannot write subtitles file ' + sub_filename) return if self.params.get('writeinfojson', False): infofn = replace_extension(filename, 'info.json', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)): self.to_screen('[info] Video description metadata is already present') else: self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn) try: write_json_file(self.filter_requested_info(info_dict), infofn) except (OSError, IOError): self.report_error('Cannot write metadata to JSON file ' + infofn) return self._write_thumbnails(info_dict, filename) if not self.params.get('skip_download', False): try: def dl(name, info): fd = get_suitable_downloader(info, self.params)(self, self.params) for ph in self._progress_hooks: fd.add_progress_hook(ph) if self.params.get('verbose'): self.to_stdout('[debug] Invoking downloader on %r' % info.get('url')) return fd.download(name, info) if info_dict.get('requested_formats') is not None: downloaded = [] success = True merger = FFmpegMergerPP(self) if not merger.available: postprocessors = [] self.report_warning('You have requested multiple ' 'formats but ffmpeg or avconv are not installed.' ' The formats won\'t be merged.') else: postprocessors = [merger] def compatible_formats(formats): video, audio = formats # Check extension video_ext, audio_ext = audio.get('ext'), video.get('ext') if video_ext and audio_ext: COMPATIBLE_EXTS = ( ('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'), ('webm') ) for exts in COMPATIBLE_EXTS: if video_ext in exts and audio_ext in exts: return True # TODO: Check acodec/vcodec return False filename_real_ext = os.path.splitext(filename)[1][1:] filename_wo_ext = ( os.path.splitext(filename)[0] if filename_real_ext == info_dict['ext'] else filename) requested_formats = info_dict['requested_formats'] if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats): info_dict['ext'] = 'mkv' self.report_warning( 'Requested formats are incompatible for merge and will be merged into mkv.') # Ensure filename always has a correct extension for successful merge filename = '%s.%s' % (filename_wo_ext, info_dict['ext']) if os.path.exists(encodeFilename(filename)): self.to_screen( '[download] %s has already been downloaded and ' 'merged' % filename) else: for f in requested_formats: new_info = dict(info_dict) new_info.update(f) fname = self.prepare_filename(new_info) fname = prepend_extension(fname, 'f%s' % f['format_id'], new_info['ext']) downloaded.append(fname) partial_success = dl(fname, new_info) success = success and partial_success info_dict['__postprocessors'] = postprocessors info_dict['__files_to_merge'] = downloaded else: # Just a single file success = dl(filename, info_dict) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_error('unable to download video data: %s' % error_to_compat_str(err)) return except (OSError, IOError) as err: raise UnavailableVideoError(err) except (ContentTooShortError, ) as err: self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) return if success and filename != '-': # Fixup content fixup_policy = self.params.get('fixup') if fixup_policy is None: fixup_policy = 'detect_or_warn' INSTALL_FFMPEG_MESSAGE = 'Install ffmpeg or avconv to fix this automatically.' stretched_ratio = info_dict.get('stretched_ratio') if stretched_ratio is not None and stretched_ratio != 1: if fixup_policy == 'warn': self.report_warning('%s: Non-uniform pixel ratio (%s)' % ( info_dict['id'], stretched_ratio)) elif fixup_policy == 'detect_or_warn': stretched_pp = FFmpegFixupStretchedPP(self) if stretched_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(stretched_pp) else: self.report_warning( '%s: Non-uniform pixel ratio (%s). %s' % (info_dict['id'], stretched_ratio, INSTALL_FFMPEG_MESSAGE)) else: assert fixup_policy in ('ignore', 'never') if (info_dict.get('requested_formats') is None and info_dict.get('container') == 'm4a_dash'): if fixup_policy == 'warn': self.report_warning( '%s: writing DASH m4a. ' 'Only some players support this container.' % info_dict['id']) elif fixup_policy == 'detect_or_warn': fixup_pp = FFmpegFixupM4aPP(self) if fixup_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(fixup_pp) else: self.report_warning( '%s: writing DASH m4a. ' 'Only some players support this container. %s' % (info_dict['id'], INSTALL_FFMPEG_MESSAGE)) else: assert fixup_policy in ('ignore', 'never') if (info_dict.get('protocol') == 'm3u8_native' or info_dict.get('protocol') == 'm3u8' and self.params.get('hls_prefer_native')): if fixup_policy == 'warn': self.report_warning('%s: malformed AAC bitstream detected.' % ( info_dict['id'])) elif fixup_policy == 'detect_or_warn': fixup_pp = FFmpegFixupM3u8PP(self) if fixup_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(fixup_pp) else: self.report_warning( '%s: malformed AAC bitstream detected. %s' % (info_dict['id'], INSTALL_FFMPEG_MESSAGE)) else: assert fixup_policy in ('ignore', 'never') try: self.post_process(filename, info_dict) except (PostProcessingError) as err: self.report_error('postprocessing: %s' % str(err)) return self.record_download_archive(info_dict) def download(self, url_list): """Download a given list of URLs.""" outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) if (len(url_list) > 1 and outtmpl != '-' and '%' not in outtmpl and self.params.get('max_downloads') != 1): raise SameFileError(outtmpl) for url in url_list: try: # It also downloads the videos res = self.extract_info( url, force_generic_extractor=self.params.get('force_generic_extractor', False)) except UnavailableVideoError: self.report_error('unable to download video') except MaxDownloadsReached: self.to_screen('[info] Maximum number of downloaded files reached.') raise else: if self.params.get('dump_single_json', False): self.to_stdout(json.dumps(res)) return self._download_retcode def download_with_info_file(self, info_filename): with contextlib.closing(fileinput.FileInput( [info_filename], mode='r', openhook=fileinput.hook_encoded('utf-8'))) as f: # FileInput doesn't have a read method, we can't call json.load info = self.filter_requested_info(json.loads('\n'.join(f))) try: self.process_ie_result(info, download=True) except DownloadError: webpage_url = info.get('webpage_url') if webpage_url is not None: self.report_warning('The info failed to download, trying with "%s"' % webpage_url) return self.download([webpage_url]) else: raise return self._download_retcode @staticmethod def filter_requested_info(info_dict): return dict( (k, v) for k, v in info_dict.items() if k not in ['requested_formats', 'requested_subtitles']) def post_process(self, filename, ie_info): """Run all the postprocessors on the given file.""" info = dict(ie_info) info['filepath'] = filename pps_chain = [] if ie_info.get('__postprocessors') is not None: pps_chain.extend(ie_info['__postprocessors']) pps_chain.extend(self._pps) for pp in pps_chain: files_to_delete = [] try: files_to_delete, info = pp.run(info) except PostProcessingError as e: self.report_error(e.msg) if files_to_delete and not self.params.get('keepvideo', False): for old_filename in files_to_delete: self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename) try: os.remove(encodeFilename(old_filename)) except (IOError, OSError): self.report_warning('Unable to remove downloaded original file') def _make_archive_id(self, info_dict): # Future-proof against any change in case # and backwards compatibility with prior versions extractor = info_dict.get('extractor_key') if extractor is None: if 'id' in info_dict: extractor = info_dict.get('ie_key') # key in a playlist if extractor is None: return None # Incomplete video information return extractor.lower() + ' ' + info_dict['id'] def in_download_archive(self, info_dict): fn = self.params.get('download_archive') if fn is None: return False vid_id = self._make_archive_id(info_dict) if vid_id is None: return False # Incomplete video information try: with locked_file(fn, 'r', encoding='utf-8') as archive_file: for line in archive_file: if line.strip() == vid_id: return True except IOError as ioe: if ioe.errno != errno.ENOENT: raise return False def record_download_archive(self, info_dict): fn = self.params.get('download_archive') if fn is None: return vid_id = self._make_archive_id(info_dict) assert vid_id with locked_file(fn, 'a', encoding='utf-8') as archive_file: archive_file.write(vid_id + '\n') @staticmethod def format_resolution(format, default='unknown'): if format.get('vcodec') == 'none': return 'audio only' if format.get('resolution') is not None: return format['resolution'] if format.get('height') is not None: if format.get('width') is not None: res = '%sx%s' % (format['width'], format['height']) else: res = '%sp' % format['height'] elif format.get('width') is not None: res = '%dx?' % format['width'] else: res = default return res def _format_note(self, fdict): res = '' if fdict.get('ext') in ['f4f', 'f4m']: res += '(unsupported) ' if fdict.get('language'): if res: res += ' ' res += '[%s] ' % fdict['language'] if fdict.get('format_note') is not None: res += fdict['format_note'] + ' ' if fdict.get('tbr') is not None: res += '%4dk ' % fdict['tbr'] if fdict.get('container') is not None: if res: res += ', ' res += '%s container' % fdict['container'] if (fdict.get('vcodec') is not None and fdict.get('vcodec') != 'none'): if res: res += ', ' res += fdict['vcodec'] if fdict.get('vbr') is not None: res += '@' elif fdict.get('vbr') is not None and fdict.get('abr') is not None: res += 'video@' if fdict.get('vbr') is not None: res += '%4dk' % fdict['vbr'] if fdict.get('fps') is not None: if res: res += ', ' res += '%sfps' % fdict['fps'] if fdict.get('acodec') is not None: if res: res += ', ' if fdict['acodec'] == 'none': res += 'video only' else: res += '%-5s' % fdict['acodec'] elif fdict.get('abr') is not None: if res: res += ', ' res += 'audio' if fdict.get('abr') is not None: res += '@%3dk' % fdict['abr'] if fdict.get('asr') is not None: res += ' (%5dHz)' % fdict['asr'] if fdict.get('filesize') is not None: if res: res += ', ' res += format_bytes(fdict['filesize']) elif fdict.get('filesize_approx') is not None: if res: res += ', ' res += '~' + format_bytes(fdict['filesize_approx']) return res def list_formats(self, info_dict): formats = info_dict.get('formats', [info_dict]) table = [ [f['format_id'], f['ext'], self.format_resolution(f), self._format_note(f)] for f in formats if f.get('preference') is None or f['preference'] >= -1000] if len(formats) > 1: table[-1][-1] += (' ' if table[-1][-1] else '') + '(best)' header_line = ['format code', 'extension', 'resolution', 'note'] self.to_screen( '[info] Available formats for %s:\n%s' % (info_dict['id'], render_table(header_line, table))) def list_thumbnails(self, info_dict): thumbnails = info_dict.get('thumbnails') if not thumbnails: self.to_screen('[info] No thumbnails present for %s' % info_dict['id']) return self.to_screen( '[info] Thumbnails for %s:' % info_dict['id']) self.to_screen(render_table( ['ID', 'width', 'height', 'URL'], [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails])) def list_subtitles(self, video_id, subtitles, name='subtitles'): if not subtitles: self.to_screen('%s has no %s' % (video_id, name)) return self.to_screen( 'Available %s for %s:' % (name, video_id)) self.to_screen(render_table( ['Language', 'formats'], [[lang, ', '.join(f['ext'] for f in reversed(formats))] for lang, formats in subtitles.items()])) def urlopen(self, req): """ Start an HTTP download """ if isinstance(req, compat_basestring): req = sanitized_Request(req) return self._opener.open(req, timeout=self._socket_timeout) def print_debug_header(self): if not self.params.get('verbose'): return if type('') is not compat_str: # Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326) self.report_warning( 'Your Python is broken! Update to a newer and supported version') stdout_encoding = getattr( sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__) encoding_str = ( '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % ( locale.getpreferredencoding(), sys.getfilesystemencoding(), stdout_encoding, self.get_encoding())) write_string(encoding_str, encoding=None) self._write_string('[debug] youtube-dl version ' + __version__ + '\n') if _LAZY_LOADER: self._write_string('[debug] Lazy loading extractors enabled' + '\n') try: sp = subprocess.Popen( ['git', 'rev-parse', '--short', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=os.path.dirname(os.path.abspath(__file__))) out, err = sp.communicate() out = out.decode().strip() if re.match('[0-9a-f]+', out): self._write_string('[debug] Git HEAD: ' + out + '\n') except Exception: try: sys.exc_clear() except Exception: pass self._write_string('[debug] Python version %s - %s\n' % ( platform.python_version(), platform_name())) exe_versions = FFmpegPostProcessor.get_versions(self) exe_versions['rtmpdump'] = rtmpdump_version() exe_str = ', '.join( '%s %s' % (exe, v) for exe, v in sorted(exe_versions.items()) if v ) if not exe_str: exe_str = 'none' self._write_string('[debug] exe versions: %s\n' % exe_str) proxy_map = {} for handler in self._opener.handlers: if hasattr(handler, 'proxies'): proxy_map.update(handler.proxies) self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n') if self.params.get('call_home', False): ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8') self._write_string('[debug] Public IP address: %s\n' % ipaddr) latest_version = self.urlopen( 'https://yt-dl.org/latest/version').read().decode('utf-8') if version_tuple(latest_version) > version_tuple(__version__): self.report_warning( 'You are using an outdated version (newest version: %s)! ' 'See https://yt-dl.org/update if you need help updating.' % latest_version) def _setup_opener(self): timeout_val = self.params.get('socket_timeout') self._socket_timeout = 600 if timeout_val is None else float(timeout_val) opts_cookiefile = self.params.get('cookiefile') opts_proxy = self.params.get('proxy') if opts_cookiefile is None: self.cookiejar = compat_cookiejar.CookieJar() else: opts_cookiefile = expand_path(opts_cookiefile) self.cookiejar = compat_cookiejar.MozillaCookieJar( opts_cookiefile) if os.access(opts_cookiefile, os.R_OK): self.cookiejar.load() cookie_processor = YoutubeDLCookieProcessor(self.cookiejar) if opts_proxy is not None: if opts_proxy == '': proxies = {} else: proxies = {'http': opts_proxy, 'https': opts_proxy} else: proxies = compat_urllib_request.getproxies() # Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805) if 'http' in proxies and 'https' not in proxies: proxies['https'] = proxies['http'] proxy_handler = PerRequestProxyHandler(proxies) debuglevel = 1 if self.params.get('debug_printtraffic') else 0 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel) ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel) data_handler = compat_urllib_request_DataHandler() # When passing our own FileHandler instance, build_opener won't add the # default FileHandler and allows us to disable the file protocol, which # can be used for malicious purposes (see # https://github.com/rg3/youtube-dl/issues/8227) file_handler = compat_urllib_request.FileHandler() def file_open(*args, **kwargs): raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in youtube-dl for security reasons') file_handler.file_open = file_open opener = compat_urllib_request.build_opener( proxy_handler, https_handler, cookie_processor, ydlh, data_handler, file_handler) # Delete the default user-agent header, which would otherwise apply in # cases where our custom HTTP handler doesn't come into play # (See https://github.com/rg3/youtube-dl/issues/1309 for details) opener.addheaders = [] self._opener = opener def encode(self, s): if isinstance(s, bytes): return s # Already encoded try: return s.encode(self.get_encoding()) except UnicodeEncodeError as err: err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.' raise def get_encoding(self): encoding = self.params.get('encoding') if encoding is None: encoding = preferredencoding() return encoding def _write_thumbnails(self, info_dict, filename): if self.params.get('writethumbnail', False): thumbnails = info_dict.get('thumbnails') if thumbnails: thumbnails = [thumbnails[-1]] elif self.params.get('write_all_thumbnails', False): thumbnails = info_dict.get('thumbnails') else: return if not thumbnails: # No thumbnails present, so return immediately return for t in thumbnails: thumb_ext = determine_ext(t['url'], 'jpg') suffix = '_%s' % t['id'] if len(thumbnails) > 1 else '' thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else '' t['filename'] = thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)): self.to_screen('[%s] %s: Thumbnail %sis already present' % (info_dict['extractor'], info_dict['id'], thumb_display_id)) else: self.to_screen('[%s] %s: Downloading thumbnail %s...' % (info_dict['extractor'], info_dict['id'], thumb_display_id)) try: uf = self.urlopen(t['url']) with open(encodeFilename(thumb_filename), 'wb') as thumbf: shutil.copyfileobj(uf, thumbf) self.to_screen('[%s] %s: Writing thumbnail %sto: %s' % (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename)) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_warning('Unable to download thumbnail "%s": %s' % (t['url'], error_to_compat_str(err)))
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-19-fixed/youtube-dl/youtube_dl/YoutubeDL.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-19-buggy/youtube-dl/youtube_dl/YoutubeDL.py
youtube-dl-bug-37
#!/usr/bin/env python # -*- coding: utf-8 -*- import calendar import contextlib import ctypes import datetime import email.utils import errno import getpass import gzip import itertools import io import json import locale import math import os import pipes import platform import re import ssl import socket import struct import subprocess import sys import traceback import xml.etree.ElementTree import zlib try: import urllib.request as compat_urllib_request except ImportError: # Python 2 import urllib2 as compat_urllib_request try: import urllib.error as compat_urllib_error except ImportError: # Python 2 import urllib2 as compat_urllib_error try: import urllib.parse as compat_urllib_parse except ImportError: # Python 2 import urllib as compat_urllib_parse try: from urllib.parse import urlparse as compat_urllib_parse_urlparse except ImportError: # Python 2 from urlparse import urlparse as compat_urllib_parse_urlparse try: import urllib.parse as compat_urlparse except ImportError: # Python 2 import urlparse as compat_urlparse try: import http.cookiejar as compat_cookiejar except ImportError: # Python 2 import cookielib as compat_cookiejar try: import html.entities as compat_html_entities except ImportError: # Python 2 import htmlentitydefs as compat_html_entities try: import html.parser as compat_html_parser except ImportError: # Python 2 import HTMLParser as compat_html_parser try: import http.client as compat_http_client except ImportError: # Python 2 import httplib as compat_http_client try: from urllib.error import HTTPError as compat_HTTPError except ImportError: # Python 2 from urllib2 import HTTPError as compat_HTTPError try: from urllib.request import urlretrieve as compat_urlretrieve except ImportError: # Python 2 from urllib import urlretrieve as compat_urlretrieve try: from subprocess import DEVNULL compat_subprocess_get_DEVNULL = lambda: DEVNULL except ImportError: compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w') try: from urllib.parse import parse_qs as compat_parse_qs except ImportError: # Python 2 # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib. # Python 2's version is apparently totally broken def _unquote(string, encoding='utf-8', errors='replace'): if string == '': return string res = string.split('%') if len(res) == 1: return string if encoding is None: encoding = 'utf-8' if errors is None: errors = 'replace' # pct_sequence: contiguous sequence of percent-encoded bytes, decoded pct_sequence = b'' string = res[0] for item in res[1:]: try: if not item: raise ValueError pct_sequence += item[:2].decode('hex') rest = item[2:] if not rest: # This segment was just a single percent-encoded character. # May be part of a sequence of code units, so delay decoding. # (Stored in pct_sequence). continue except ValueError: rest = '%' + item # Encountered non-percent-encoded characters. Flush the current # pct_sequence. string += pct_sequence.decode(encoding, errors) + rest pct_sequence = b'' if pct_sequence: # Flush the final pct_sequence string += pct_sequence.decode(encoding, errors) return string def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace'): qs, _coerce_result = qs, unicode pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] r = [] for name_value in pairs: if not name_value and not strict_parsing: continue nv = name_value.split('=', 1) if len(nv) != 2: if strict_parsing: raise ValueError("bad query field: %r" % (name_value,)) # Handle case of a control-name with no equal sign if keep_blank_values: nv.append('') else: continue if len(nv[1]) or keep_blank_values: name = nv[0].replace('+', ' ') name = _unquote(name, encoding=encoding, errors=errors) name = _coerce_result(name) value = nv[1].replace('+', ' ') value = _unquote(value, encoding=encoding, errors=errors) value = _coerce_result(value) r.append((name, value)) return r def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace'): parsed_result = {} pairs = _parse_qsl(qs, keep_blank_values, strict_parsing, encoding=encoding, errors=errors) for name, value in pairs: if name in parsed_result: parsed_result[name].append(value) else: parsed_result[name] = [value] return parsed_result try: compat_str = unicode # Python 2 except NameError: compat_str = str try: compat_chr = unichr # Python 2 except NameError: compat_chr = chr try: from xml.etree.ElementTree import ParseError as compat_xml_parse_error except ImportError: # Python 2.6 from xml.parsers.expat import ExpatError as compat_xml_parse_error def compat_ord(c): if type(c) is int: return c else: return ord(c) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() u'TEST'.encode(pref) except: pref = 'UTF-8' return pref if sys.version_info < (3,0): def compat_print(s): print(s.encode(preferredencoding(), 'xmlcharrefreplace')) else: def compat_print(s): assert type(s) == type(u'') print(s) # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3,0): def write_json_file(obj, fn): with open(fn, 'wb') as f: json.dump(obj, f) else: def write_json_file(obj, fn): with open(fn, 'w', encoding='utf-8') as f: json.dump(obj, f) if sys.version_info >= (2,7): def find_xpath_attr(node, xpath, key, val): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z]+$', key) assert re.match(r'^[a-zA-Z0-9@\s:._]*$', val) expr = xpath + u"[@%s='%s']" % (key, val) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val): for f in node.findall(xpath): if f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def htmlentity_transform(matchobj): """Transforms an HTML entity to a character. This function receives a match object and is intended to be used with the re.sub() function. """ entity = matchobj.group(1) # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) mobj = re.match(u'(?u)#(x?\\d+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith(u'x'): base = 16 numstr = u'0%s' % numstr else: base = 10 return compat_chr(int(numstr, base)) # Unknown entity in name, return its literal representation return (u'&%s;' % entity) compat_html_parser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix class BaseHTMLParser(compat_html_parser.HTMLParser): def __init(self): compat_html_parser.HTMLParser.__init__(self) self.html = None def loads(self, html): self.html = html self.feed(html) self.close() class AttrParser(BaseHTMLParser): """Modified HTMLParser that isolates a tag with the specified attribute""" def __init__(self, attribute, value): self.attribute = attribute self.value = value self.result = None self.started = False self.depth = {} self.watch_startpos = False self.error_count = 0 BaseHTMLParser.__init__(self) def error(self, message): if self.error_count > 10 or self.started: raise compat_html_parser.HTMLParseError(message, self.getpos()) self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line self.error_count += 1 self.goahead(1) def handle_starttag(self, tag, attrs): attrs = dict(attrs) if self.started: self.find_startpos(None) if self.attribute in attrs and attrs[self.attribute] == self.value: self.result = [tag] self.started = True self.watch_startpos = True if self.started: if not tag in self.depth: self.depth[tag] = 0 self.depth[tag] += 1 def handle_endtag(self, tag): if self.started: if tag in self.depth: self.depth[tag] -= 1 if self.depth[self.result[0]] == 0: self.started = False self.result.append(self.getpos()) def find_startpos(self, x): """Needed to put the start position of the result (self.result[1]) after the opening tag with the requested id""" if self.watch_startpos: self.watch_startpos = False self.result.append(self.getpos()) handle_entityref = handle_charref = handle_data = handle_comment = \ handle_decl = handle_pi = unknown_decl = find_startpos def get_result(self): if self.result is None: return None if len(self.result) != 3: return None lines = self.html.split('\n') lines = lines[self.result[1][0]-1:self.result[2][0]] lines[0] = lines[0][self.result[1][1]:] if len(lines) == 1: lines[-1] = lines[-1][:self.result[2][1]-self.result[1][1]] lines[-1] = lines[-1][:self.result[2][1]] return '\n'.join(lines).strip() # Hack for https://github.com/rg3/youtube-dl/issues/662 if sys.version_info < (2, 7, 3): AttrParser.parse_endtag = (lambda self, i: i + len("</scr'+'ipt>") if self.rawdata[i:].startswith("</scr'+'ipt>") else compat_html_parser.HTMLParser.parse_endtag(self, i)) def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute("id", id, html) def get_element_by_attribute(attribute, value, html): """Return the content of the tag with the specified attribute in the passed HTML document""" parser = AttrParser(attribute, value) try: parser.loads(html) except compat_html_parser.HTMLParseError: pass return parser.get_result() class MetaParser(BaseHTMLParser): """ Modified HTMLParser that isolates a meta tag with the specified name attribute. """ def __init__(self, name): BaseHTMLParser.__init__(self) self.name = name self.content = None self.result = None def handle_starttag(self, tag, attrs): if tag != 'meta': return attrs = dict(attrs) if attrs.get('name') == self.name: self.result = attrs.get('content') def get_result(self): return self.result def get_meta_content(name, html): """ Return the content attribute from the meta tag with the given name attribute. """ parser = MetaParser(name) try: parser.loads(html) except compat_html_parser.HTMLParseError: pass return parser.get_result() def clean_html(html): """Clean an HTML snippet into a readable string""" # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == u'-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = os.path.join( re.sub(u'[/<>:"\\|\\\\?\\*]', u'#', path_part) for path_part in os.path.split(filename) ) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char result = u''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if not result: result = '_' return result def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def unescapeHTML(s): if s is None: return None assert type(s) == compat_str result = re.sub(r'(?u)&(.+?);', htmlentity_transform, s) return result def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # Pass u'' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess: return s else: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return s.encode(encoding, 'ignore') def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(opts_no_check_certificate, **kwargs): if sys.version_info < (3, 2): import httplib class HTTPSConnectionV3(httplib.HTTPSConnection): def __init__(self, *args, **kwargs): httplib.HTTPSConnection.__init__(self, *args, **kwargs) def connect(self): sock = socket.create_connection((self.host, self.port), self.timeout) if getattr(self, '_tunnel_host', False): self.sock = sock self._tunnel() try: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv3) except ssl.SSLError: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv23) class HTTPSHandlerV3(compat_urllib_request.HTTPSHandler): def https_open(self, req): return self.do_open(HTTPSConnectionV3, req) return HTTPSHandlerV3(**kwargs) else: context = ssl.SSLContext(ssl.PROTOCOL_SSLv3) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() try: context.load_default_certs() except AttributeError: pass # Python < 3.4 return compat_urllib_request.HTTPSHandler(context=context, **kwargs) class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if not expected: msg = msg + u'; please report this issue on https://yt-dl.org/bug . Be sure to call youtube-dl with the --verbose flag and include its complete output. Make sure you are using the latest version; type youtube-dl -U to update.' super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause def format_traceback(self): if self.traceback is None: return None return u''.join(traceback.format_tb(self.traceback)) class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ # Both in bytes downloaded = None expected = None def __init__(self, downloaded, expected): self.downloaded = downloaded self.expected = expected class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-No-Compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): for h,v in std_headers.items(): if h in req.headers: del req.headers[h] req.add_header(h, v) if 'Youtubedl-no-compression' in req.headers: if 'Accept-encoding' in req.headers: del req.headers['Accept-encoding'] del req.headers['Youtubedl-no-compression'] if 'Youtubedl-user-agent' in req.headers: if 'User-agent' in req.headers: del req.headers['User-agent'] req.headers['User-agent'] = req.headers['Youtubedl-user-agent'] del req.headers['Youtubedl-user-agent'] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg return resp https_request = http_request https_response = http_response def parse_iso8601(date_str): """ Return a UNIX timestamp from the given date """ if date_str is None: return None m = re.search( r'Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group(0))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) dt = datetime.datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%S') - timezone return calendar.timegm(dt.timetuple()) def unified_strdate(date_str): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None #Replace commas date_str = date_str.replace(',', ' ') # %z (UTC offset) is only supported in python>=3.2 date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str) format_expressions = [ '%d %B %Y', '%d %b %Y', '%B %d %Y', '%b %d %Y', '%Y-%m-%d', '%d.%m.%Y', '%d/%m/%Y', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', ] for expression in format_expressions: try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') return upload_date def determine_ext(url, default_ext=u'unknown_video'): guess = url.partition(u'?')[0].rpartition(u'.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str == 'now'or date_str == 'today': return today match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') #A bad aproximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, "%Y%m%d").date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day,day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % ( self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def write_string(s, out=None): if out is None: out = sys.stderr assert type(s) == compat_str if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr s = s.encode(preferredencoding(), 'ignore') try: out.write(s) except UnicodeEncodeError: # In Windows shells, this can fail even when the codec is just charmap!? # See https://wiki.python.org/moin/PrintFails#Issue if sys.platform == 'win32' and hasattr(out, 'encoding'): s = s.encode(out.encoding, 'ignore').decode(out.encoding) out.write(s) else: raise out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' if isinstance(chr(0), bytes): # Python 2 return ''.join([chr(x) for x in xs]) else: return bytes(xs) def get_cachedir(params={}): cache_root = os.environ.get('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) return params.get('cachedir', os.path.join(cache_root, 'youtube-dl')) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: import fcntl def _lock_file(f, exclusive): fcntl.lockf(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.lockf(f, fcntl.LOCK_UN) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def shell_quote(args): quoted_args = [] encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return u' '.join(quoted_args) def takewhile_inclusive(pred, seq): """ Like itertools.takewhile, but include the latest evaluated element (the first element so that Not pred(e)) """ for e in seq: yield e if not pred(e): return def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ sdata = compat_urllib_parse.urlencode( {u'__youtubedl_smuggle': json.dumps(data)}) return url + u'#' + sdata def unsmuggle_url(smug_url, default=None): if not '#__youtubedl_smuggle' in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition(u'#') jsond = compat_parse_qs(sdata)[u'__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return u'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = [u'B', u'KiB', u'MiB', u'GiB', u'TiB', u'PiB', u'EiB', u'ZiB', u'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return u'%.2f%s' % (converted, suffix) def str_to_int(int_str): int_str = re.sub(r'[,\.]', u'', int_str) return int(int_str) def get_term_width(): columns = os.environ.get('COLUMNS', None) if columns: return int(columns) try: sp = subprocess.Popen( ['stty', 'size'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = sp.communicate() return int(out.split()[1]) except: pass return None def month_by_name(name): """ Return the number of a month by (locale-independently) English name """ ENGLISH_NAMES = [ u'January', u'February', u'March', u'April', u'May', u'June', u'July', u'August', u'September', u'October', u'November', u'December'] try: return ENGLISH_NAMES.index(name) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', u'&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) try: libc = ctypes.cdll.LoadLibrary("libc.so.6") except OSError: return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): if s.startswith(start): return s[len(start):] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip(u'/').split(u'/')[-1] class HEADRequest(compat_urllib_request.Request): def get_method(self): return "HEAD" def int_or_none(v, scale=1, default=None): return default if v is None else (int(v) // scale) def float_or_none(v, scale=1, default=None): return default if v is None else (float(v) / scale) def parse_duration(s): if s is None: return None m = re.match( r'(?:(?:(?P<hours>[0-9]+)[:h])?(?P<mins>[0-9]+)[:m])?(?P<secs>[0-9]+)s?(?::[0-9]+)?$', s) if not m: return None res = int(m.group('secs')) if m.group('mins'): res += int(m.group('mins')) * 60 if m.group('hours'): res += int(m.group('hours')) * 60 * 60 return res def prepend_extension(filename, ext): name, real_ext = os.path.splitext(filename) return u'{0}.{1}{2}'.format(name, ext, real_ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe class PagedList(object): def __init__(self, pagefunc, pagesize): self._pagefunc = pagefunc self._pagesize = pagesize def __len__(self): # This is only useful for tests return len(self.getslice()) def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = list(self._pagefunc(pagenum)) startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res def uppercase_escape(s): return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: m.group(0).decode('unicode-escape'), s) try: struct.pack(u'!I', 0) except TypeError: # In Python 2.6 (and some 2.7 versions), struct requires a bytes argument def struct_pack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.pack(spec, *args) def struct_unpack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.unpack(spec, *args) else: struct_pack = struct.pack struct_unpack = struct.unpack def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = u'\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii') def parse_xml(s): class TreeBuilder(xml.etree.ElementTree.TreeBuilder): def doctype(self, name, pubid, system): pass # Ignore doctypes parser = xml.etree.ElementTree.XMLParser(target=TreeBuilder()) kwargs = {'parser': parser} if sys.version_info >= (2, 7) else {} return xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs) if sys.version_info < (3, 0) and sys.platform == 'win32': def compat_getpass(prompt, *args, **kwargs): if isinstance(prompt, compat_str): prompt = prompt.encode(preferredencoding()) return getpass.getpass(prompt, *args, **kwargs) else: compat_getpass = getpass.getpass US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } def strip_jsonp(code): return re.sub(r'(?s)^[a-zA-Z_]+\s*\(\s*(.*)\);\s*?\s*$', r'\1', code) #!/usr/bin/env python # -*- coding: utf-8 -*- import calendar import codecs import contextlib import ctypes import datetime import email.utils import errno import getpass import gzip import itertools import io import json import locale import math import os import pipes import platform import re import ssl import socket import struct import subprocess import sys import traceback import xml.etree.ElementTree import zlib try: import urllib.request as compat_urllib_request except ImportError: # Python 2 import urllib2 as compat_urllib_request try: import urllib.error as compat_urllib_error except ImportError: # Python 2 import urllib2 as compat_urllib_error try: import urllib.parse as compat_urllib_parse except ImportError: # Python 2 import urllib as compat_urllib_parse try: from urllib.parse import urlparse as compat_urllib_parse_urlparse except ImportError: # Python 2 from urlparse import urlparse as compat_urllib_parse_urlparse try: import urllib.parse as compat_urlparse except ImportError: # Python 2 import urlparse as compat_urlparse try: import http.cookiejar as compat_cookiejar except ImportError: # Python 2 import cookielib as compat_cookiejar try: import html.entities as compat_html_entities except ImportError: # Python 2 import htmlentitydefs as compat_html_entities try: import html.parser as compat_html_parser except ImportError: # Python 2 import HTMLParser as compat_html_parser try: import http.client as compat_http_client except ImportError: # Python 2 import httplib as compat_http_client try: from urllib.error import HTTPError as compat_HTTPError except ImportError: # Python 2 from urllib2 import HTTPError as compat_HTTPError try: from urllib.request import urlretrieve as compat_urlretrieve except ImportError: # Python 2 from urllib import urlretrieve as compat_urlretrieve try: from subprocess import DEVNULL compat_subprocess_get_DEVNULL = lambda: DEVNULL except ImportError: compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w') try: from urllib.parse import parse_qs as compat_parse_qs except ImportError: # Python 2 # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib. # Python 2's version is apparently totally broken def _unquote(string, encoding='utf-8', errors='replace'): if string == '': return string res = string.split('%') if len(res) == 1: return string if encoding is None: encoding = 'utf-8' if errors is None: errors = 'replace' # pct_sequence: contiguous sequence of percent-encoded bytes, decoded pct_sequence = b'' string = res[0] for item in res[1:]: try: if not item: raise ValueError pct_sequence += item[:2].decode('hex') rest = item[2:] if not rest: # This segment was just a single percent-encoded character. # May be part of a sequence of code units, so delay decoding. # (Stored in pct_sequence). continue except ValueError: rest = '%' + item # Encountered non-percent-encoded characters. Flush the current # pct_sequence. string += pct_sequence.decode(encoding, errors) + rest pct_sequence = b'' if pct_sequence: # Flush the final pct_sequence string += pct_sequence.decode(encoding, errors) return string def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace'): qs, _coerce_result = qs, unicode pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] r = [] for name_value in pairs: if not name_value and not strict_parsing: continue nv = name_value.split('=', 1) if len(nv) != 2: if strict_parsing: raise ValueError("bad query field: %r" % (name_value,)) # Handle case of a control-name with no equal sign if keep_blank_values: nv.append('') else: continue if len(nv[1]) or keep_blank_values: name = nv[0].replace('+', ' ') name = _unquote(name, encoding=encoding, errors=errors) name = _coerce_result(name) value = nv[1].replace('+', ' ') value = _unquote(value, encoding=encoding, errors=errors) value = _coerce_result(value) r.append((name, value)) return r def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace'): parsed_result = {} pairs = _parse_qsl(qs, keep_blank_values, strict_parsing, encoding=encoding, errors=errors) for name, value in pairs: if name in parsed_result: parsed_result[name].append(value) else: parsed_result[name] = [value] return parsed_result try: compat_str = unicode # Python 2 except NameError: compat_str = str try: compat_chr = unichr # Python 2 except NameError: compat_chr = chr try: from xml.etree.ElementTree import ParseError as compat_xml_parse_error except ImportError: # Python 2.6 from xml.parsers.expat import ExpatError as compat_xml_parse_error def compat_ord(c): if type(c) is int: return c else: return ord(c) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() u'TEST'.encode(pref) except: pref = 'UTF-8' return pref if sys.version_info < (3,0): def compat_print(s): print(s.encode(preferredencoding(), 'xmlcharrefreplace')) else: def compat_print(s): assert type(s) == type(u'') print(s) # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3,0): def write_json_file(obj, fn): with open(fn, 'wb') as f: json.dump(obj, f) else: def write_json_file(obj, fn): with open(fn, 'w', encoding='utf-8') as f: json.dump(obj, f) if sys.version_info >= (2,7): def find_xpath_attr(node, xpath, key, val): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z]+$', key) assert re.match(r'^[a-zA-Z0-9@\s:._]*$', val) expr = xpath + u"[@%s='%s']" % (key, val) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val): for f in node.findall(xpath): if f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def htmlentity_transform(matchobj): """Transforms an HTML entity to a character. This function receives a match object and is intended to be used with the re.sub() function. """ entity = matchobj.group(1) # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) mobj = re.match(u'(?u)#(x?\\d+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith(u'x'): base = 16 numstr = u'0%s' % numstr else: base = 10 return compat_chr(int(numstr, base)) # Unknown entity in name, return its literal representation return (u'&%s;' % entity) compat_html_parser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix class BaseHTMLParser(compat_html_parser.HTMLParser): def __init(self): compat_html_parser.HTMLParser.__init__(self) self.html = None def loads(self, html): self.html = html self.feed(html) self.close() class AttrParser(BaseHTMLParser): """Modified HTMLParser that isolates a tag with the specified attribute""" def __init__(self, attribute, value): self.attribute = attribute self.value = value self.result = None self.started = False self.depth = {} self.watch_startpos = False self.error_count = 0 BaseHTMLParser.__init__(self) def error(self, message): if self.error_count > 10 or self.started: raise compat_html_parser.HTMLParseError(message, self.getpos()) self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line self.error_count += 1 self.goahead(1) def handle_starttag(self, tag, attrs): attrs = dict(attrs) if self.started: self.find_startpos(None) if self.attribute in attrs and attrs[self.attribute] == self.value: self.result = [tag] self.started = True self.watch_startpos = True if self.started: if not tag in self.depth: self.depth[tag] = 0 self.depth[tag] += 1 def handle_endtag(self, tag): if self.started: if tag in self.depth: self.depth[tag] -= 1 if self.depth[self.result[0]] == 0: self.started = False self.result.append(self.getpos()) def find_startpos(self, x): """Needed to put the start position of the result (self.result[1]) after the opening tag with the requested id""" if self.watch_startpos: self.watch_startpos = False self.result.append(self.getpos()) handle_entityref = handle_charref = handle_data = handle_comment = \ handle_decl = handle_pi = unknown_decl = find_startpos def get_result(self): if self.result is None: return None if len(self.result) != 3: return None lines = self.html.split('\n') lines = lines[self.result[1][0]-1:self.result[2][0]] lines[0] = lines[0][self.result[1][1]:] if len(lines) == 1: lines[-1] = lines[-1][:self.result[2][1]-self.result[1][1]] lines[-1] = lines[-1][:self.result[2][1]] return '\n'.join(lines).strip() # Hack for https://github.com/rg3/youtube-dl/issues/662 if sys.version_info < (2, 7, 3): AttrParser.parse_endtag = (lambda self, i: i + len("</scr'+'ipt>") if self.rawdata[i:].startswith("</scr'+'ipt>") else compat_html_parser.HTMLParser.parse_endtag(self, i)) def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute("id", id, html) def get_element_by_attribute(attribute, value, html): """Return the content of the tag with the specified attribute in the passed HTML document""" parser = AttrParser(attribute, value) try: parser.loads(html) except compat_html_parser.HTMLParseError: pass return parser.get_result() class MetaParser(BaseHTMLParser): """ Modified HTMLParser that isolates a meta tag with the specified name attribute. """ def __init__(self, name): BaseHTMLParser.__init__(self) self.name = name self.content = None self.result = None def handle_starttag(self, tag, attrs): if tag != 'meta': return attrs = dict(attrs) if attrs.get('name') == self.name: self.result = attrs.get('content') def get_result(self): return self.result def get_meta_content(name, html): """ Return the content attribute from the meta tag with the given name attribute. """ parser = MetaParser(name) try: parser.loads(html) except compat_html_parser.HTMLParseError: pass return parser.get_result() def clean_html(html): """Clean an HTML snippet into a readable string""" # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == u'-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = os.path.join( re.sub(u'[/<>:"\\|\\\\?\\*]', u'#', path_part) for path_part in os.path.split(filename) ) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char result = u''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if not result: result = '_' return result def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def unescapeHTML(s): if s is None: return None assert type(s) == compat_str result = re.sub(r'(?u)&(.+?);', htmlentity_transform, s) return result def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # Pass u'' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess: return s else: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return s.encode(encoding, 'ignore') def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(opts_no_check_certificate, **kwargs): if sys.version_info < (3, 2): import httplib class HTTPSConnectionV3(httplib.HTTPSConnection): def __init__(self, *args, **kwargs): httplib.HTTPSConnection.__init__(self, *args, **kwargs) def connect(self): sock = socket.create_connection((self.host, self.port), self.timeout) if getattr(self, '_tunnel_host', False): self.sock = sock self._tunnel() try: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv3) except ssl.SSLError: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv23) class HTTPSHandlerV3(compat_urllib_request.HTTPSHandler): def https_open(self, req): return self.do_open(HTTPSConnectionV3, req) return HTTPSHandlerV3(**kwargs) else: context = ssl.SSLContext(ssl.PROTOCOL_SSLv3) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() try: context.load_default_certs() except AttributeError: pass # Python < 3.4 return compat_urllib_request.HTTPSHandler(context=context, **kwargs) class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if not expected: msg = msg + u'; please report this issue on https://yt-dl.org/bug . Be sure to call youtube-dl with the --verbose flag and include its complete output. Make sure you are using the latest version; type youtube-dl -U to update.' super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause def format_traceback(self): if self.traceback is None: return None return u''.join(traceback.format_tb(self.traceback)) class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ # Both in bytes downloaded = None expected = None def __init__(self, downloaded, expected): self.downloaded = downloaded self.expected = expected class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-No-Compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): for h,v in std_headers.items(): if h in req.headers: del req.headers[h] req.add_header(h, v) if 'Youtubedl-no-compression' in req.headers: if 'Accept-encoding' in req.headers: del req.headers['Accept-encoding'] del req.headers['Youtubedl-no-compression'] if 'Youtubedl-user-agent' in req.headers: if 'User-agent' in req.headers: del req.headers['User-agent'] req.headers['User-agent'] = req.headers['Youtubedl-user-agent'] del req.headers['Youtubedl-user-agent'] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg return resp https_request = http_request https_response = http_response def parse_iso8601(date_str): """ Return a UNIX timestamp from the given date """ if date_str is None: return None m = re.search( r'Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group(0))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) dt = datetime.datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%S') - timezone return calendar.timegm(dt.timetuple()) def unified_strdate(date_str): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None #Replace commas date_str = date_str.replace(',', ' ') # %z (UTC offset) is only supported in python>=3.2 date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str) format_expressions = [ '%d %B %Y', '%d %b %Y', '%B %d %Y', '%b %d %Y', '%Y-%m-%d', '%d.%m.%Y', '%d/%m/%Y', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', ] for expression in format_expressions: try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') return upload_date def determine_ext(url, default_ext=u'unknown_video'): guess = url.partition(u'?')[0].rpartition(u'.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str == 'now'or date_str == 'today': return today match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') #A bad aproximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, "%Y%m%d").date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day,day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % ( self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def write_string(s, out=None): if out is None: out = sys.stderr assert type(s) == compat_str if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr s = s.encode(preferredencoding(), 'ignore') try: out.write(s) except UnicodeEncodeError: # In Windows shells, this can fail even when the codec is just charmap!? # See https://wiki.python.org/moin/PrintFails#Issue if sys.platform == 'win32' and hasattr(out, 'encoding'): s = s.encode(out.encoding, 'ignore').decode(out.encoding) out.write(s) else: raise out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' if isinstance(chr(0), bytes): # Python 2 return ''.join([chr(x) for x in xs]) else: return bytes(xs) def get_cachedir(params={}): cache_root = os.environ.get('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) return params.get('cachedir', os.path.join(cache_root, 'youtube-dl')) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: import fcntl def _lock_file(f, exclusive): fcntl.lockf(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.lockf(f, fcntl.LOCK_UN) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def shell_quote(args): quoted_args = [] encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return u' '.join(quoted_args) def takewhile_inclusive(pred, seq): """ Like itertools.takewhile, but include the latest evaluated element (the first element so that Not pred(e)) """ for e in seq: yield e if not pred(e): return def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ sdata = compat_urllib_parse.urlencode( {u'__youtubedl_smuggle': json.dumps(data)}) return url + u'#' + sdata def unsmuggle_url(smug_url, default=None): if not '#__youtubedl_smuggle' in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition(u'#') jsond = compat_parse_qs(sdata)[u'__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return u'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = [u'B', u'KiB', u'MiB', u'GiB', u'TiB', u'PiB', u'EiB', u'ZiB', u'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return u'%.2f%s' % (converted, suffix) def str_to_int(int_str): int_str = re.sub(r'[,\.]', u'', int_str) return int(int_str) def get_term_width(): columns = os.environ.get('COLUMNS', None) if columns: return int(columns) try: sp = subprocess.Popen( ['stty', 'size'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = sp.communicate() return int(out.split()[1]) except: pass return None def month_by_name(name): """ Return the number of a month by (locale-independently) English name """ ENGLISH_NAMES = [ u'January', u'February', u'March', u'April', u'May', u'June', u'July', u'August', u'September', u'October', u'November', u'December'] try: return ENGLISH_NAMES.index(name) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', u'&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) try: libc = ctypes.cdll.LoadLibrary("libc.so.6") except OSError: return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): if s.startswith(start): return s[len(start):] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip(u'/').split(u'/')[-1] class HEADRequest(compat_urllib_request.Request): def get_method(self): return "HEAD" def int_or_none(v, scale=1, default=None): return default if v is None else (int(v) // scale) def float_or_none(v, scale=1, default=None): return default if v is None else (float(v) / scale) def parse_duration(s): if s is None: return None m = re.match( r'(?:(?:(?P<hours>[0-9]+)[:h])?(?P<mins>[0-9]+)[:m])?(?P<secs>[0-9]+)s?(?::[0-9]+)?$', s) if not m: return None res = int(m.group('secs')) if m.group('mins'): res += int(m.group('mins')) * 60 if m.group('hours'): res += int(m.group('hours')) * 60 * 60 return res def prepend_extension(filename, ext): name, real_ext = os.path.splitext(filename) return u'{0}.{1}{2}'.format(name, ext, real_ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe class PagedList(object): def __init__(self, pagefunc, pagesize): self._pagefunc = pagefunc self._pagesize = pagesize def __len__(self): # This is only useful for tests return len(self.getslice()) def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = list(self._pagefunc(pagenum)) startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) try: struct.pack(u'!I', 0) except TypeError: # In Python 2.6 (and some 2.7 versions), struct requires a bytes argument def struct_pack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.pack(spec, *args) def struct_unpack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.unpack(spec, *args) else: struct_pack = struct.pack struct_unpack = struct.unpack def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = u'\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii') def parse_xml(s): class TreeBuilder(xml.etree.ElementTree.TreeBuilder): def doctype(self, name, pubid, system): pass # Ignore doctypes parser = xml.etree.ElementTree.XMLParser(target=TreeBuilder()) kwargs = {'parser': parser} if sys.version_info >= (2, 7) else {} return xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs) if sys.version_info < (3, 0) and sys.platform == 'win32': def compat_getpass(prompt, *args, **kwargs): if isinstance(prompt, compat_str): prompt = prompt.encode(preferredencoding()) return getpass.getpass(prompt, *args, **kwargs) else: compat_getpass = getpass.getpass US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } def strip_jsonp(code): return re.sub(r'(?s)^[a-zA-Z_]+\s*\(\s*(.*)\);\s*?\s*$', r'\1', code)
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-37-fixed/youtube-dl/youtube_dl/utils.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-37-buggy/youtube-dl/youtube_dl/utils.py
youtube-dl-bug-10
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals import calendar import codecs import contextlib import ctypes import datetime import email.utils import errno import functools import gzip import itertools import io import json import locale import math import operator import os import pipes import platform import re import ssl import socket import struct import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_basestring, compat_chr, compat_getenv, compat_html_entities, compat_http_client, compat_parse_qs, compat_socket_create_connection, compat_str, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlparse, compat_urllib_request, compat_urlparse, shlex_quote, ) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**args) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z-]+$', key) assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val) expr = xpath + "[@%s='%s']" % (key, val) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val): # Here comes the crazy part: In 2.6, if the xpath is a unicode, # .//node does not match if a node is a direct child of . ! if isinstance(xpath, compat_str): xpath = xpath.encode('ascii') for f in node.findall(xpath): if f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_text(node, xpath, name=None, fatal=False): if sys.version_info < (2, 7): # Crazy 2.6 xpath = xpath.encode('ascii') n = node.find(xpath) if n is None or n.text is None: if fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n.text def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute("id", id, html) def get_element_by_attribute(attribute, value, html): """Return the content of the tag with the specified attribute in the passed HTML document""" m = re.search(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), re.escape(value)), html) if not m: return None res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] return unescapeHTML(res) def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = os.path.join( re.sub('[/<>:"\\|\\\\?\\*]', '#', path_part) for path_part in os.path.split(filename) ) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if not result: result = '_' return result def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity): """Transforms an HTML entity to a character.""" # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) mobj = re.match(r'#(x?[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 return compat_chr(int(numstr, base)) # Unknown entity in name, return its literal representation return ('&%s;' % entity) def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+);', lambda m: _htmlentity_transform(m.group(1)), s) def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess: return s else: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return s.encode(encoding, 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg += '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ # Both in bytes downloaded = None expected = None def __init__(self, downloaded, expected): self.downloaded = downloaded self.expected = expected def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-No-Compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): return self.do_open(functools.partial( _create_http_connection, self, compat_http_client.HTTPConnection, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) if 'Youtubedl-no-compression' in req.headers: if 'Accept-encoding' in req.headers: del req.headers['Accept-encoding'] del req.headers['Youtubedl-no-compression'] if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg return resp https_request = http_request https_response = http_response class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname return self.do_open(functools.partial( _create_http_connection, self, self._https_conn_class, True), req, **kwargs) def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None if timezone is None: m = re.search( r'(\.[0-9]+)?(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group(0))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # %z (UTC offset) is only supported in python>=3.2 date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) format_expressions = [ '%d %B %Y', '%d %b %Y', '%B %d %Y', '%b %d %Y', '%b %dst %Y %I:%M%p', '%b %dnd %Y %I:%M%p', '%b %dth %Y %I:%M%p', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', ] if day_first: format_expressions.extend([ '%d.%m.%Y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) else: format_expressions.extend([ '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) for expression in format_expressions: try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') return upload_date def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad aproximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, "%Y%m%d").date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( (b"GetStdHandle", ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)((b"WriteConsoleW", ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b"GetFileType", ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( (b"GetConsoleMode", ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return ' '.join(quoted_args) def takewhile_inclusive(pred, seq): """ Like itertools.takewhile, but include the latest evaluated element (the first element so that Not pred(e)) """ for e in seq: yield e if not pred(e): return def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ sdata = compat_urllib_parse.urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and inofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, } units_re = '|'.join(re.escape(u) for u in _UNIT_TABLE) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = _UNIT_TABLE[m.group('unit')] return int(float(num_str) * mult) def get_term_width(): columns = compat_getenv('COLUMNS', None) if columns: return int(columns) try: sp = subprocess.Popen( ['stty', 'size'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = sp.communicate() return int(out.split()[1]) except: pass return None def month_by_name(name): """ Return the number of a month by (locale-independently) English name """ try: return ENGLISH_MONTH_NAMES.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) try: libc = ctypes.cdll.LoadLibrary("libc.so.6") except OSError: return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): if s.startswith(start): return s[len(start):] return s def remove_end(s, end): if s.endswith(end): return s[:-len(end)] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] class HEADRequest(compat_urllib_request.Request): def get_method(self): return "HEAD" def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None return default if v is None else (int(v) * invscale // scale) def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): return default if v is None else (float(v) * invscale / scale) def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() m = re.match( r'''(?ix)(?:P?T)? (?: (?P<only_mins>[0-9.]+)\s*(?:mins?|minutes?)\s*| (?P<only_hours>[0-9.]+)\s*(?:hours?)| (?: (?: (?:(?P<days>[0-9]+)\s*(?:[:d]|days?)\s*)? (?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s* )? (?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s* )? (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)? )$''', s) if not m: return None res = 0 if m.group('only_mins'): return float_or_none(m.group('only_mins'), invscale=60) if m.group('only_hours'): return float_or_none(m.group('only_hours'), invscale=60 * 60) if m.group('secs'): res += int(m.group('secs')) if m.group('mins'): res += int(m.group('mins')) * 60 if m.group('hours'): res += int(m.group('hours')) * 60 * 60 if m.group('days'): res += int(m.group('days')) * 24 * 60 * 60 if m.group('ms'): res += float(m.group('ms')) return res def prepend_extension(filename, ext): name, real_ext = os.path.splitext(filename) return '{0}.{1}{2}'.format(name, ext, real_ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: out, _ = subprocess.Popen( [exe] + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize): self._pagefunc = pagefunc self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = list(self._pagefunc(pagenum)) startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() try: struct.pack('!I', 0) except TypeError: # In Python 2.6 (and some 2.7 versions), struct requires a bytes argument def struct_pack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.pack(spec, *args) def struct_unpack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.unpack(spec, *args) else: struct_pack = struct.pack struct_unpack = struct.unpack def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii') try: etree_iter = xml.etree.ElementTree.Element.iter except AttributeError: # Python <=2.6 etree_iter = lambda n: n.findall('.//*') def parse_xml(s): class TreeBuilder(xml.etree.ElementTree.TreeBuilder): def doctype(self, name, pubid, system): pass # Ignore doctypes parser = xml.etree.ElementTree.XMLParser(target=TreeBuilder()) kwargs = {'parser': parser} if sys.version_info >= (2, 7) else {} tree = xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs) # Fix up XML parser in Python 2.x if sys.version_info < (3, 0): for n in etree_iter(tree): if n.text is not None: if not isinstance(n.text, compat_str): n.text = n.text.decode('utf-8') return tree US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } def parse_age_limit(s): if s is None: return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) return int(m.group('age')) if m else US_RATINGS.get(s, None) def strip_jsonp(code): return re.sub( r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code) def js_to_json(code): def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v if v.startswith('"'): return v if v.startswith("'"): v = v[1:-1] v = re.sub(r"\\\\|\\'|\"", lambda m: { '\\\\': '\\\\', "\\'": "'", '"': '\\"', }[m.group(0)], v) return '"%s"' % v res = re.sub(r'''(?x) "(?:[^"\\]*(?:\\\\|\\")?)*"| '(?:[^'\\]*(?:\\\\|\\')?)*'| [a-zA-Z_][.a-zA-Z_0-9]* ''', fix_kv, code) res = re.sub(r',(\s*\])', lambda m: m.group(1), res) return res def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(shlex_quote(a) for a in args) def urlhandle_detect_ext(url_handle): try: url_handle.headers getheader = lambda h: url_handle.headers[h] except AttributeError: # Python < 3 getheader = url_handle.info().getheader cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return getheader('Content-Type').split("/")[1] def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] if m.group('strval') is not None: if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('strval') else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) actual_value = dct.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func #!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals import calendar import codecs import contextlib import ctypes import datetime import email.utils import errno import functools import gzip import itertools import io import json import locale import math import operator import os import pipes import platform import re import ssl import socket import struct import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_basestring, compat_chr, compat_getenv, compat_html_entities, compat_http_client, compat_parse_qs, compat_socket_create_connection, compat_str, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlparse, compat_urllib_request, compat_urlparse, shlex_quote, ) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**args) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z-]+$', key) assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val) expr = xpath + "[@%s='%s']" % (key, val) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val): # Here comes the crazy part: In 2.6, if the xpath is a unicode, # .//node does not match if a node is a direct child of . ! if isinstance(xpath, compat_str): xpath = xpath.encode('ascii') for f in node.findall(xpath): if f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_text(node, xpath, name=None, fatal=False): if sys.version_info < (2, 7): # Crazy 2.6 xpath = xpath.encode('ascii') n = node.find(xpath) if n is None or n.text is None: if fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n.text def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute("id", id, html) def get_element_by_attribute(attribute, value, html): """Return the content of the tag with the specified attribute in the passed HTML document""" m = re.search(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), re.escape(value)), html) if not m: return None res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] return unescapeHTML(res) def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = os.path.join( re.sub('[/<>:"\\|\\\\?\\*]', '#', path_part) for path_part in os.path.split(filename) ) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if not result: result = '_' return result def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity): """Transforms an HTML entity to a character.""" # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) mobj = re.match(r'#(x?[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 return compat_chr(int(numstr, base)) # Unknown entity in name, return its literal representation return ('&%s;' % entity) def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+);', lambda m: _htmlentity_transform(m.group(1)), s) def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess: return s else: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return s.encode(encoding, 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg += '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ # Both in bytes downloaded = None expected = None def __init__(self, downloaded, expected): self.downloaded = downloaded self.expected = expected def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-No-Compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): return self.do_open(functools.partial( _create_http_connection, self, compat_http_client.HTTPConnection, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) if 'Youtubedl-no-compression' in req.headers: if 'Accept-encoding' in req.headers: del req.headers['Accept-encoding'] del req.headers['Youtubedl-no-compression'] if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg return resp https_request = http_request https_response = http_response class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname return self.do_open(functools.partial( _create_http_connection, self, self._https_conn_class, True), req, **kwargs) def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None if timezone is None: m = re.search( r'(\.[0-9]+)?(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group(0))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # %z (UTC offset) is only supported in python>=3.2 date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) format_expressions = [ '%d %B %Y', '%d %b %Y', '%B %d %Y', '%b %d %Y', '%b %dst %Y %I:%M%p', '%b %dnd %Y %I:%M%p', '%b %dth %Y %I:%M%p', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', ] if day_first: format_expressions.extend([ '%d.%m.%Y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) else: format_expressions.extend([ '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) for expression in format_expressions: try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') return upload_date def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad aproximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, "%Y%m%d").date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( (b"GetStdHandle", ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)((b"WriteConsoleW", ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b"GetFileType", ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( (b"GetConsoleMode", ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return ' '.join(quoted_args) def takewhile_inclusive(pred, seq): """ Like itertools.takewhile, but include the latest evaluated element (the first element so that Not pred(e)) """ for e in seq: yield e if not pred(e): return def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ sdata = compat_urllib_parse.urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and inofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, } units_re = '|'.join(re.escape(u) for u in _UNIT_TABLE) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = _UNIT_TABLE[m.group('unit')] return int(float(num_str) * mult) def get_term_width(): columns = compat_getenv('COLUMNS', None) if columns: return int(columns) try: sp = subprocess.Popen( ['stty', 'size'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = sp.communicate() return int(out.split()[1]) except: pass return None def month_by_name(name): """ Return the number of a month by (locale-independently) English name """ try: return ENGLISH_MONTH_NAMES.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) try: libc = ctypes.cdll.LoadLibrary("libc.so.6") except OSError: return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): if s.startswith(start): return s[len(start):] return s def remove_end(s, end): if s.endswith(end): return s[:-len(end)] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] class HEADRequest(compat_urllib_request.Request): def get_method(self): return "HEAD" def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None return default if v is None else (int(v) * invscale // scale) def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): return default if v is None else (float(v) * invscale / scale) def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() m = re.match( r'''(?ix)(?:P?T)? (?: (?P<only_mins>[0-9.]+)\s*(?:mins?|minutes?)\s*| (?P<only_hours>[0-9.]+)\s*(?:hours?)| (?: (?: (?:(?P<days>[0-9]+)\s*(?:[:d]|days?)\s*)? (?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s* )? (?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s* )? (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)? )$''', s) if not m: return None res = 0 if m.group('only_mins'): return float_or_none(m.group('only_mins'), invscale=60) if m.group('only_hours'): return float_or_none(m.group('only_hours'), invscale=60 * 60) if m.group('secs'): res += int(m.group('secs')) if m.group('mins'): res += int(m.group('mins')) * 60 if m.group('hours'): res += int(m.group('hours')) * 60 * 60 if m.group('days'): res += int(m.group('days')) * 24 * 60 * 60 if m.group('ms'): res += float(m.group('ms')) return res def prepend_extension(filename, ext): name, real_ext = os.path.splitext(filename) return '{0}.{1}{2}'.format(name, ext, real_ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: out, _ = subprocess.Popen( [exe] + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize): self._pagefunc = pagefunc self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = list(self._pagefunc(pagenum)) startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() try: struct.pack('!I', 0) except TypeError: # In Python 2.6 (and some 2.7 versions), struct requires a bytes argument def struct_pack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.pack(spec, *args) def struct_unpack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.unpack(spec, *args) else: struct_pack = struct.pack struct_unpack = struct.unpack def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii') try: etree_iter = xml.etree.ElementTree.Element.iter except AttributeError: # Python <=2.6 etree_iter = lambda n: n.findall('.//*') def parse_xml(s): class TreeBuilder(xml.etree.ElementTree.TreeBuilder): def doctype(self, name, pubid, system): pass # Ignore doctypes parser = xml.etree.ElementTree.XMLParser(target=TreeBuilder()) kwargs = {'parser': parser} if sys.version_info >= (2, 7) else {} tree = xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs) # Fix up XML parser in Python 2.x if sys.version_info < (3, 0): for n in etree_iter(tree): if n.text is not None: if not isinstance(n.text, compat_str): n.text = n.text.decode('utf-8') return tree US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } def parse_age_limit(s): if s is None: return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) return int(m.group('age')) if m else US_RATINGS.get(s, None) def strip_jsonp(code): return re.sub( r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code) def js_to_json(code): def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v if v.startswith('"'): return v if v.startswith("'"): v = v[1:-1] v = re.sub(r"\\\\|\\'|\"", lambda m: { '\\\\': '\\\\', "\\'": "'", '"': '\\"', }[m.group(0)], v) return '"%s"' % v res = re.sub(r'''(?x) "(?:[^"\\]*(?:\\\\|\\['"nu]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nu]))*[^'\\]*'| [a-zA-Z_][.a-zA-Z_0-9]* ''', fix_kv, code) res = re.sub(r',(\s*\])', lambda m: m.group(1), res) return res def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(shlex_quote(a) for a in args) def urlhandle_detect_ext(url_handle): try: url_handle.headers getheader = lambda h: url_handle.headers[h] except AttributeError: # Python < 3 getheader = url_handle.info().getheader cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return getheader('Content-Type').split("/")[1] def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] if m.group('strval') is not None: if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('strval') else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) actual_value = dct.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-10-fixed/youtube-dl/youtube_dl/utils.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-10-buggy/youtube-dl/youtube_dl/utils.py
youtube-dl-bug-23
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals import base64 import binascii import calendar import codecs import contextlib import ctypes import datetime import email.utils import errno import functools import gzip import io import itertools import json import locale import math import operator import os import pipes import platform import re import socket import ssl import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_HTMLParser, compat_basestring, compat_chr, compat_etree_fromstring, compat_html_entities, compat_html_entities_html5, compat_http_client, compat_kwargs, compat_os_name, compat_parse_qs, compat_shlex_quote, compat_socket_create_connection, compat_str, compat_struct_pack, compat_struct_unpack, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, compat_urllib_parse_unquote_plus, compat_urllib_request, compat_urlparse, compat_xpath, ) from .socks import ( ProxyType, sockssocket, ) def register_socks_protocols(): # "Register" SOCKS protocols # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904 # URLs with protocols not in urlparse.uses_netloc are not handled correctly for scheme in ('socks', 'socks4', 'socks4a', 'socks5'): if scheme not in compat_urlparse.uses_netloc: compat_urlparse.uses_netloc.append(scheme) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } USER_AGENTS = { 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] MONTH_NAMES = { 'en': ENGLISH_MONTH_NAMES, 'fr': [ 'janvier', 'février', 'mars', 'avril', 'mai', 'juin', 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'], } KNOWN_EXTENSIONS = ( 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', 'flv', 'f4v', 'f4a', 'f4b', 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', 'mkv', 'mka', 'mk3d', 'avi', 'divx', 'mov', 'asf', 'wmv', 'wma', '3gp', '3g2', 'mp3', 'flac', 'ape', 'wav', 'f4f', 'f4m', 'm3u8', 'smil') # needed for sanitizing filenames in restricted mode ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'], 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy'))) DATE_FORMATS = ( '%d %B %Y', '%d %b %Y', '%B %d %Y', '%B %dst %Y', '%B %dnd %Y', '%B %dth %Y', '%b %d %Y', '%b %dst %Y', '%b %dnd %Y', '%b %dth %Y', '%b %dst %Y %I:%M', '%b %dnd %Y %I:%M', '%b %dth %Y %I:%M', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', '%b %d %Y at %H:%M', '%b %d %Y at %H:%M:%S', ) DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS) DATE_FORMATS_DAY_FIRST.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d.%m.%y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS) DATE_FORMATS_MONTH_FIRST.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)" def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): for f in node.findall(compat_xpath(xpath)): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): return node.find(compat_xpath(xpath)) if isinstance(xpath, (str, compat_str)): n = _find_xpath(xpath) else: for xp in xpath: n = _find_xpath(xp) if n is not None: break if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute('id', id, html) def get_element_by_class(class_name, html): return get_element_by_attribute( 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name), html, escape_value=False) def get_element_by_attribute(attribute, value, html, escape_value=True): """Return the content of the tag with the specified attribute in the passed HTML document""" value = re.escape(value) if escape_value else value m = re.search(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), value), html) if not m: return None res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] return unescapeHTML(res) class HTMLAttributeParser(compat_HTMLParser): """Trivial HTML parser to gather the attributes for a single element""" def __init__(self): self.attrs = {} compat_HTMLParser.__init__(self) def handle_starttag(self, tag, attrs): self.attrs = dict(attrs) def extract_attributes(html_element): """Given a string for an HTML element such as <el a="foo" B="bar" c="&98;az" d=boz empty= noval entity="&amp;" sq='"' dq="'" > Decode and return a dictionary of attributes. { 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz', 'empty': '', 'noval': None, 'entity': '&', 'sq': '"', 'dq': '\'' }. NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions, but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5. """ parser = HTMLAttributeParser() parser.feed(html_element) parser.close() return parser.attrs def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if restricted and char in ACCENT_CHARS: return ACCENT_CHARS[char] if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of # unwanted failures due to missing protocol def sanitize_url(url): return 'http:%s' % url if url.startswith('//') else url def sanitized_Request(url, *args, **kwargs): return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity_with_semicolon): """Transforms an HTML entity to a character.""" entity = entity_with_semicolon[:-1] # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) # TODO: HTML5 allows entities without a semicolon. For example, # '&Eacuteric' should be decoded as 'Éric'. if entity_with_semicolon in compat_html_entities_html5: return compat_html_entities_html5[entity_with_semicolon] mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 # See https://github.com/rg3/youtube-dl/issues/7518 try: return compat_chr(int(numstr, base)) except ValueError: pass # Unknown entity in name, return its literal representation return '&%s;' % entity def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+;)', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible if sys.platform.startswith('java'): return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): # Both in bytes self.downloaded = downloaded self.expected = expected class XAttrMetadataError(Exception): def __init__(self, code=None, msg='Unknown error'): super(XAttrMetadataError, self).__init__(msg) self.code = code self.msg = msg # Parsing code and msg if (self.code in (errno.ENOSPC, errno.EDQUOT) or 'No space left' in self.msg or 'Disk quota excedded' in self.msg): self.reason = 'NO_SPACE' elif self.code == errno.E2BIG or 'Argument list too long' in self.msg: self.reason = 'VALUE_TOO_LONG' else: self.reason = 'NOT_SUPPORTED' class XAttrUnavailableError(Exception): pass def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs[b'strict'] = True hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc def handle_youtubedl_headers(headers): filtered_headers = headers if 'Youtubedl-no-compression' in filtered_headers: filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') del filtered_headers['Youtubedl-no-compression'] return filtered_headers class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-no-compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): conn_class = compat_http_client.HTTPConnection socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req = update_Request(req, url=url_escaped) for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) req.headers = handle_youtubedl_headers(req.headers) if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') else: location = location.decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] if sys.version_info < (3, 0): location_escaped = location_escaped.encode('utf-8') resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response def make_socks_conn_class(base_class, socks_proxy): assert issubclass(base_class, ( compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection)) url_components = compat_urlparse.urlparse(socks_proxy) if url_components.scheme.lower() == 'socks5': socks_type = ProxyType.SOCKS5 elif url_components.scheme.lower() in ('socks', 'socks4'): socks_type = ProxyType.SOCKS4 elif url_components.scheme.lower() == 'socks4a': socks_type = ProxyType.SOCKS4A def unquote_if_non_empty(s): if not s: return s return compat_urllib_parse_unquote_plus(s) proxy_args = ( socks_type, url_components.hostname, url_components.port or 1080, True, # Remote DNS unquote_if_non_empty(url_components.username), unquote_if_non_empty(url_components.password), ) class SocksConnection(base_class): def connect(self): self.sock = sockssocket() self.sock.setproxy(*proxy_args) if type(self.timeout) in (int, float): self.sock.settimeout(self.timeout) self.sock.connect((self.host, self.port)) if isinstance(self, compat_http_client.HTTPSConnection): if hasattr(self, '_context'): # Python > 2.6 self.sock = self._context.wrap_socket( self.sock, server_hostname=self.host) else: self.sock = ssl.wrap_socket(self.sock) return SocksConnection class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} conn_class = self._https_conn_class if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, True), req, **kwargs) class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def extract_timezone(date_str): m = re.search( r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group('tz'))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) return timezone, date_str def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: timezone, date_str = extract_timezone(date_str) try: date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) except ValueError: pass def date_formats(day_first=True): return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) _, date_str = extract_timezone(date_str) for expression in date_formats(day_first): try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: try: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') except ValueError: pass if upload_date is not None: return compat_str(upload_date) def unified_timestamp(date_str, day_first=True): if date_str is None: return None date_str = date_str.replace(',', ' ') pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0 timezone, date_str = extract_timezone(date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) for expression in date_formats(day_first): try: dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta) return calendar.timegm(dt.timetuple()) except ValueError: pass timetuple = email.utils.parsedate_tz(date_str) if timetuple: return calendar.timegm(timetuple) + pm_delta * 3600 def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download elif guess.rstrip('/') in KNOWN_EXTENSIONS: return guess.rstrip('/') else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad approximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, '%Y%m%d').date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( (b'GetStdHandle', ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( (b'GetConsoleMode', ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return compat_struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: # Some platforms, such as Jython, is missing fcntl try: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) except ImportError: UNSUPPORTED_MSG = 'file locking is not supported on this platform' def _lock_file(f, exclusive): raise IOError(UNSUPPORTED_MSG) def _unlock_file(f): raise IOError(UNSUPPORTED_MSG) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ url, idata = unsmuggle_url(url, {}) data.update(idata) sdata = compat_urllib_parse_urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def lookup_unit_table(unit_table, s): units_re = '|'.join(re.escape(u) for u in unit_table) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = unit_table[m.group('unit')] return int(float(num_str) * mult) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and unofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'bytes': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'kb': 1000, 'kilobytes': 1000, 'kibibytes': 1024, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'mb': 1000 ** 2, 'megabytes': 1000 ** 2, 'mebibytes': 1024 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'gb': 1000 ** 3, 'gigabytes': 1000 ** 3, 'gibibytes': 1024 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'tb': 1000 ** 4, 'terabytes': 1000 ** 4, 'tebibytes': 1024 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'pb': 1000 ** 5, 'petabytes': 1000 ** 5, 'pebibytes': 1024 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'eb': 1000 ** 6, 'exabytes': 1000 ** 6, 'exbibytes': 1024 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'zb': 1000 ** 7, 'zettabytes': 1000 ** 7, 'zebibytes': 1024 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, 'yb': 1000 ** 8, 'yottabytes': 1000 ** 8, 'yobibytes': 1024 ** 8, } return lookup_unit_table(_UNIT_TABLE, s) def parse_count(s): if s is None: return None s = s.strip() if re.match(r'^[\d,.]+$', s): return str_to_int(s) _UNIT_TABLE = { 'k': 1000, 'K': 1000, 'm': 1000 ** 2, 'M': 1000 ** 2, 'kk': 1000 ** 2, 'KK': 1000 ** 2, } return lookup_unit_table(_UNIT_TABLE, s) def month_by_name(name, lang='en'): """ Return the number of a month by (locale-independently) English name """ month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en']) try: return month_names.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) # ctypes in Jython is not complete # http://bugs.jython.org/issue2148 if sys.platform.startswith('java'): return try: libc = ctypes.cdll.LoadLibrary('libc.so.6') except OSError: return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): return s[len(start):] if s is not None and s.startswith(start) else s def remove_end(s, end): return s[:-len(end)] if s is not None and s.endswith(end) else s def remove_quotes(s): if s is None or len(s) < 2: return s for quote in ('"', "'", ): if s[0] == quote and s[-1] == quote: return s[1:-1] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] def base_url(url): return re.match(r'https?://[^?#&]+/', url).group() def urljoin(base, path): if not isinstance(path, compat_str) or not path: return None if re.match(r'^(?:https?:)?//', path): return path if not isinstance(base, compat_str) or not re.match(r'^(?:https?:)?//', base): return None return compat_urlparse.urljoin(base, path) class HEADRequest(compat_urllib_request.Request): def get_method(self): return 'HEAD' class PUTRequest(compat_urllib_request.Request): def get_method(self): return 'PUT' def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def strip_or_none(v): return None if v is None else v.strip() def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() days, hours, mins, secs, ms = [None] * 5 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match( r'''(?ix)(?:P?T)? (?: (?P<days>[0-9]+)\s*d(?:ays?)?\s* )? (?: (?P<hours>[0-9]+)\s*h(?:ours?)?\s* )? (?: (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s* )? (?: (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s* )?Z?$''', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s) if m: hours, mins = m.groups() else: return None duration = 0 if secs: duration += float(secs) if mins: duration += float(mins) * 60 if hours: duration += float(hours) * 60 * 60 if days: duration += float(days) * 24 * 60 * 60 if ms: duration += float(ms) return duration def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers # SIGTTOU if youtube-dl is run in the background. # See https://github.com/rg3/youtube-dl/issues/955#issuecomment-209789656 out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize, use_cache=False): self._pagefunc = pagefunc self._pagesize = pagesize self._use_cache = use_cache if use_cache: self._cache = {} def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = None if self._use_cache: page_results = self._cache.get(pagenum) if page_results is None: page_results = list(self._pagefunc(pagenum)) if self._use_cache: self._cache[pagenum] = page_results startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( netloc=url_parsed.netloc.encode('idna').decode('ascii'), path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii') def update_url_query(url, query): if not query: return url parsed_url = compat_urlparse.urlparse(url) qs = compat_parse_qs(parsed_url.query) qs.update(query) return compat_urlparse.urlunparse(parsed_url._replace( query=compat_urllib_parse_urlencode(qs, True))) def update_Request(req, url=None, data=None, headers={}, query={}): req_headers = req.headers.copy() req_headers.update(headers) req_data = data or req.data req_url = update_url_query(url or req.get_full_url(), query) req_get_method = req.get_method() if req_get_method == 'HEAD': req_type = HEADRequest elif req_get_method == 'PUT': req_type = PUTRequest else: req_type = compat_urllib_request.Request new_req = req_type( req_url, data=req_data, headers=req_headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) if hasattr(req, 'timeout'): new_req.timeout = req.timeout return new_req def dict_get(d, key_or_keys, default=None, skip_false_values=True): if isinstance(key_or_keys, (list, tuple)): for key in key_or_keys: if key not in d or d[key] is None or skip_false_values and not d[key]: continue return d[key] return default return d.get(key_or_keys, default) def try_get(src, getter, expected_type=None): try: v = getter(src) except (AttributeError, KeyError, TypeError, IndexError): pass else: if expected_type is None or isinstance(v, expected_type): return v def encode_compat_str(string, encoding=preferredencoding(), errors='strict'): return string if isinstance(string, compat_str) else compat_str(string, encoding, errors) US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } TV_PARENTAL_GUIDELINES = { 'TV-Y': 0, 'TV-Y7': 7, 'TV-G': 0, 'TV-PG': 0, 'TV-14': 14, 'TV-MA': 17, } def parse_age_limit(s): if type(s) == int: return s if 0 <= s <= 21 else None if not isinstance(s, compat_basestring): return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) if m: return int(m.group('age')) if s in US_RATINGS: return US_RATINGS[s] return TV_PARENTAL_GUIDELINES.get(s) def strip_jsonp(code): return re.sub( r'(?s)^[a-zA-Z0-9_.$]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code) def js_to_json(code): def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v elif v.startswith('/*') or v == ',': return "" if v[0] in ("'", '"'): v = re.sub(r'(?s)\\.|"', lambda m: { '"': '\\"', "\\'": "'", '\\\n': '', '\\x': '\\u00', }.get(m.group(0), m.group(0)), v[1:-1]) INTEGER_TABLE = ( (r'^(0[xX][0-9a-fA-F]+)\s*:?$', 16), (r'^(0+[0-7]+)\s*:?$', 8), ) for regex, base in INTEGER_TABLE: im = re.match(regex, v) if im: i = int(im.group(1), base) return '"%d":' % i if v.endswith(':') else '%d' % i return '"%s"' % v return re.sub(r'''(?sx) "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'| /\*.*?\*/|,(?=\s*[\]}])| [a-zA-Z_][.a-zA-Z_0-9]*| \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:\s*:)?| [0-9]+(?=\s*:) ''', fix_kv, code) def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(compat_shlex_quote(a) for a in args) def error_to_compat_str(err): err_str = str(err) # On python 2 error byte string must be decoded with proper # encoding rather than ascii if sys.version_info[0] < 3: err_str = err_str.decode(preferredencoding()) return err_str def mimetype2ext(mt): if mt is None: return None ext = { 'audio/mp4': 'm4a', # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as # it's the most popular one 'audio/mpeg': 'mp3', }.get(mt) if ext is not None: return ext _, _, res = mt.rpartition('/') res = res.split(';')[0].strip().lower() return { '3gpp': '3gp', 'smptett+xml': 'tt', 'srt': 'srt', 'ttaf+xml': 'dfxp', 'ttml+xml': 'ttml', 'vtt': 'vtt', 'x-flv': 'flv', 'x-mp4-fragmented': 'mp4', 'x-ms-wmv': 'wmv', 'mpegurl': 'm3u8', 'x-mpegurl': 'm3u8', 'vnd.apple.mpegurl': 'm3u8', 'dash+xml': 'mpd', 'f4m': 'f4m', 'f4m+xml': 'f4m', 'hds+xml': 'f4m', 'vnd.ms-sstr+xml': 'ism', 'quicktime': 'mov', }.get(res, res) def parse_codecs(codecs_str): # http://tools.ietf.org/html/rfc6381 if not codecs_str: return {} splited_codecs = list(filter(None, map( lambda str: str.strip(), codecs_str.strip().strip(',').split(',')))) vcodec, acodec = None, None for full_codec in splited_codecs: codec = full_codec.split('.')[0] if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v'): if not vcodec: vcodec = full_codec elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3'): if not acodec: acodec = full_codec else: write_string('WARNING: Unknown codec %s' % full_codec, sys.stderr) if not vcodec and not acodec: if len(splited_codecs) == 2: return { 'vcodec': vcodec, 'acodec': acodec, } elif len(splited_codecs) == 1: return { 'vcodec': 'none', 'acodec': vcodec, } else: return { 'vcodec': vcodec or 'none', 'acodec': acodec or 'none', } return {} def urlhandle_detect_ext(url_handle): getheader = url_handle.headers.get cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) if (m.group('strval') is not None or # If the original field is a string and matching comparisonvalue is # a number we should respect the origin of the original field # and process comparison value as a string (see # https://github.com/rg3/youtube-dl/issues/11082). actual_value is not None and m.group('intval') is not None and isinstance(actual_value, compat_str)): if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('strval') or m.group('intval') else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.')) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): _x = functools.partial(xpath_with_ns, ns_map={ 'ttml': 'http://www.w3.org/ns/ttml', 'ttaf1': 'http://www.w3.org/2006/10/ttaf1', 'ttaf1_0604': 'http://www.w3.org/2006/04/ttaf1', }) class TTMLPElementParser(object): out = '' def start(self, tag, attrib): if tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'): self.out += '\n' def end(self, tag): pass def data(self, data): self.out += data def close(self): return self.out.strip() def parse_node(node): target = TTMLPElementParser() parser = xml.etree.ElementTree.XMLParser(target=target) parser.feed(xml.etree.ElementTree.tostring(node)) return parser.close() dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8')) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall(_x('.//ttaf1_0604:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) dur = parse_dfxp_time_expr(para.attrib.get('dur')) if begin_time is None: continue if not end_time: if not dur: continue end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) if param: param = compat_str(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) return compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'): req.add_header('Ytdl-socks-proxy', proxy) # youtube-dl's http/https handlers do wrapping the socket with socks return None return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type) def ohdave_rsa_encrypt(data, exponent, modulus): ''' Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/ Input: data: data to encrypt, bytes-like object exponent, modulus: parameter e and N of RSA algorithm, both integer Output: hex string of encrypted data Limitation: supports one block encryption only ''' payload = int(binascii.hexlify(data[::-1]), 16) encrypted = pow(payload, exponent, modulus) return '%x' % encrypted def encode_base_n(num, n, table=None): FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' if not table: table = FULL_TABLE[:n] if n > len(table): raise ValueError('base %d exceeds table length %d' % (n, len(table))) if num == 0: return table[0] ret = '' while num: ret = table[num % n] + ret num = num // n return ret def decode_packed_codes(code): mobj = re.search(PACKED_CODES_RE, code) obfucasted_code, base, count, symbols = mobj.groups() base = int(base) count = int(count) symbols = symbols.split('|') symbol_table = {} while count: count -= 1 base_n_count = encode_base_n(count, base) symbol_table[base_n_count] = symbols[count] or base_n_count return re.sub( r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)], obfucasted_code) def parse_m3u8_attributes(attrib): info = {} for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib): if val.startswith('"'): val = val[1:-1] info[key] = val return info def urshift(val, n): return val >> n if val >= 0 else (val + 0x100000000) >> n # Based on png2str() written by @gdkchan and improved by @yokrysty # Originally posted at https://github.com/rg3/youtube-dl/issues/9706 def decode_png(png_data): # Reference: https://www.w3.org/TR/PNG/ header = png_data[8:] if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR': raise IOError('Not a valid PNG file.') int_map = {1: '>B', 2: '>H', 4: '>I'} unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0] chunks = [] while header: length = unpack_integer(header[:4]) header = header[4:] chunk_type = header[:4] header = header[4:] chunk_data = header[:length] header = header[length:] header = header[4:] # Skip CRC chunks.append({ 'type': chunk_type, 'length': length, 'data': chunk_data }) ihdr = chunks[0]['data'] width = unpack_integer(ihdr[:4]) height = unpack_integer(ihdr[4:8]) idat = b'' for chunk in chunks: if chunk['type'] == b'IDAT': idat += chunk['data'] if not idat: raise IOError('Unable to read PNG data.') decompressed_data = bytearray(zlib.decompress(idat)) stride = width * 3 pixels = [] def _get_pixel(idx): x = idx % stride y = idx // stride return pixels[y][x] for y in range(height): basePos = y * (1 + stride) filter_type = decompressed_data[basePos] current_row = [] pixels.append(current_row) for x in range(stride): color = decompressed_data[1 + basePos + x] basex = y * stride + x left = 0 up = 0 if x > 2: left = _get_pixel(basex - 3) if y > 0: up = _get_pixel(basex - stride) if filter_type == 1: # Sub color = (color + left) & 0xff elif filter_type == 2: # Up color = (color + up) & 0xff elif filter_type == 3: # Average color = (color + ((left + up) >> 1)) & 0xff elif filter_type == 4: # Paeth a = left b = up c = 0 if x > 2 and y > 0: c = _get_pixel(basex - stride - 3) p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: color = (color + a) & 0xff elif pb <= pc: color = (color + b) & 0xff else: color = (color + c) & 0xff current_row.append(color) return width, height, pixels def write_xattr(path, key, value): # This mess below finds the best xattr tool for the job try: # try the pyxattr module... import xattr if hasattr(xattr, 'set'): # pyxattr # Unicode arguments are not supported in python-pyxattr until # version 0.5.0 # See https://github.com/rg3/youtube-dl/issues/5498 pyxattr_required_version = '0.5.0' if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version): # TODO: fallback to CLI tools raise XAttrUnavailableError( 'python-pyxattr is detected but is too old. ' 'youtube-dl requires %s or above while your version is %s. ' 'Falling back to other xattr implementations' % ( pyxattr_required_version, xattr.__version__)) setxattr = xattr.set else: # xattr setxattr = xattr.setxattr try: setxattr(path, key, value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) except ImportError: if compat_os_name == 'nt': # Write xattrs to NTFS Alternate Data Streams: # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29 assert ':' not in key assert os.path.exists(path) ads_fn = path + ':' + key try: with open(ads_fn, 'wb') as f: f.write(value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) else: user_has_setfattr = check_executable('setfattr', ['--version']) user_has_xattr = check_executable('xattr', ['-h']) if user_has_setfattr or user_has_xattr: value = value.decode('utf-8') if user_has_setfattr: executable = 'setfattr' opts = ['-n', key, '-v', value] elif user_has_xattr: executable = 'xattr' opts = ['-w', key, value] cmd = ([encodeFilename(executable, True)] + [encodeArgument(o) for o in opts] + [encodeFilename(path, True)]) try: p = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) stdout, stderr = p.communicate() stderr = stderr.decode('utf-8', 'replace') if p.returncode != 0: raise XAttrMetadataError(p.returncode, stderr) else: # On Unix, and can't find pyxattr, setfattr, or xattr. if sys.platform.startswith('linux'): raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'pyxattr' or 'xattr' " "modules, or the GNU 'attr' package " "(which contains the 'setfattr' tool).") else: raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'xattr' module, " "or the 'xattr' binary.") #!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals import base64 import binascii import calendar import codecs import contextlib import ctypes import datetime import email.utils import errno import functools import gzip import io import itertools import json import locale import math import operator import os import pipes import platform import re import socket import ssl import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_HTMLParser, compat_basestring, compat_chr, compat_etree_fromstring, compat_html_entities, compat_html_entities_html5, compat_http_client, compat_kwargs, compat_os_name, compat_parse_qs, compat_shlex_quote, compat_socket_create_connection, compat_str, compat_struct_pack, compat_struct_unpack, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, compat_urllib_parse_unquote_plus, compat_urllib_request, compat_urlparse, compat_xpath, ) from .socks import ( ProxyType, sockssocket, ) def register_socks_protocols(): # "Register" SOCKS protocols # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904 # URLs with protocols not in urlparse.uses_netloc are not handled correctly for scheme in ('socks', 'socks4', 'socks4a', 'socks5'): if scheme not in compat_urlparse.uses_netloc: compat_urlparse.uses_netloc.append(scheme) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } USER_AGENTS = { 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] MONTH_NAMES = { 'en': ENGLISH_MONTH_NAMES, 'fr': [ 'janvier', 'février', 'mars', 'avril', 'mai', 'juin', 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'], } KNOWN_EXTENSIONS = ( 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', 'flv', 'f4v', 'f4a', 'f4b', 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', 'mkv', 'mka', 'mk3d', 'avi', 'divx', 'mov', 'asf', 'wmv', 'wma', '3gp', '3g2', 'mp3', 'flac', 'ape', 'wav', 'f4f', 'f4m', 'm3u8', 'smil') # needed for sanitizing filenames in restricted mode ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'], 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy'))) DATE_FORMATS = ( '%d %B %Y', '%d %b %Y', '%B %d %Y', '%B %dst %Y', '%B %dnd %Y', '%B %dth %Y', '%b %d %Y', '%b %dst %Y', '%b %dnd %Y', '%b %dth %Y', '%b %dst %Y %I:%M', '%b %dnd %Y %I:%M', '%b %dth %Y %I:%M', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', '%b %d %Y at %H:%M', '%b %d %Y at %H:%M:%S', ) DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS) DATE_FORMATS_DAY_FIRST.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d.%m.%y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS) DATE_FORMATS_MONTH_FIRST.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)" def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): for f in node.findall(compat_xpath(xpath)): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): return node.find(compat_xpath(xpath)) if isinstance(xpath, (str, compat_str)): n = _find_xpath(xpath) else: for xp in xpath: n = _find_xpath(xp) if n is not None: break if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute('id', id, html) def get_element_by_class(class_name, html): return get_element_by_attribute( 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name), html, escape_value=False) def get_element_by_attribute(attribute, value, html, escape_value=True): """Return the content of the tag with the specified attribute in the passed HTML document""" value = re.escape(value) if escape_value else value m = re.search(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), value), html) if not m: return None res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] return unescapeHTML(res) class HTMLAttributeParser(compat_HTMLParser): """Trivial HTML parser to gather the attributes for a single element""" def __init__(self): self.attrs = {} compat_HTMLParser.__init__(self) def handle_starttag(self, tag, attrs): self.attrs = dict(attrs) def extract_attributes(html_element): """Given a string for an HTML element such as <el a="foo" B="bar" c="&98;az" d=boz empty= noval entity="&amp;" sq='"' dq="'" > Decode and return a dictionary of attributes. { 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz', 'empty': '', 'noval': None, 'entity': '&', 'sq': '"', 'dq': '\'' }. NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions, but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5. """ parser = HTMLAttributeParser() parser.feed(html_element) parser.close() return parser.attrs def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if restricted and char in ACCENT_CHARS: return ACCENT_CHARS[char] if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of # unwanted failures due to missing protocol def sanitize_url(url): return 'http:%s' % url if url.startswith('//') else url def sanitized_Request(url, *args, **kwargs): return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity_with_semicolon): """Transforms an HTML entity to a character.""" entity = entity_with_semicolon[:-1] # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) # TODO: HTML5 allows entities without a semicolon. For example, # '&Eacuteric' should be decoded as 'Éric'. if entity_with_semicolon in compat_html_entities_html5: return compat_html_entities_html5[entity_with_semicolon] mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 # See https://github.com/rg3/youtube-dl/issues/7518 try: return compat_chr(int(numstr, base)) except ValueError: pass # Unknown entity in name, return its literal representation return '&%s;' % entity def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+;)', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible if sys.platform.startswith('java'): return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): # Both in bytes self.downloaded = downloaded self.expected = expected class XAttrMetadataError(Exception): def __init__(self, code=None, msg='Unknown error'): super(XAttrMetadataError, self).__init__(msg) self.code = code self.msg = msg # Parsing code and msg if (self.code in (errno.ENOSPC, errno.EDQUOT) or 'No space left' in self.msg or 'Disk quota excedded' in self.msg): self.reason = 'NO_SPACE' elif self.code == errno.E2BIG or 'Argument list too long' in self.msg: self.reason = 'VALUE_TOO_LONG' else: self.reason = 'NOT_SUPPORTED' class XAttrUnavailableError(Exception): pass def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs[b'strict'] = True hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc def handle_youtubedl_headers(headers): filtered_headers = headers if 'Youtubedl-no-compression' in filtered_headers: filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') del filtered_headers['Youtubedl-no-compression'] return filtered_headers class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-no-compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): conn_class = compat_http_client.HTTPConnection socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req = update_Request(req, url=url_escaped) for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) req.headers = handle_youtubedl_headers(req.headers) if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') else: location = location.decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] if sys.version_info < (3, 0): location_escaped = location_escaped.encode('utf-8') resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response def make_socks_conn_class(base_class, socks_proxy): assert issubclass(base_class, ( compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection)) url_components = compat_urlparse.urlparse(socks_proxy) if url_components.scheme.lower() == 'socks5': socks_type = ProxyType.SOCKS5 elif url_components.scheme.lower() in ('socks', 'socks4'): socks_type = ProxyType.SOCKS4 elif url_components.scheme.lower() == 'socks4a': socks_type = ProxyType.SOCKS4A def unquote_if_non_empty(s): if not s: return s return compat_urllib_parse_unquote_plus(s) proxy_args = ( socks_type, url_components.hostname, url_components.port or 1080, True, # Remote DNS unquote_if_non_empty(url_components.username), unquote_if_non_empty(url_components.password), ) class SocksConnection(base_class): def connect(self): self.sock = sockssocket() self.sock.setproxy(*proxy_args) if type(self.timeout) in (int, float): self.sock.settimeout(self.timeout) self.sock.connect((self.host, self.port)) if isinstance(self, compat_http_client.HTTPSConnection): if hasattr(self, '_context'): # Python > 2.6 self.sock = self._context.wrap_socket( self.sock, server_hostname=self.host) else: self.sock = ssl.wrap_socket(self.sock) return SocksConnection class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} conn_class = self._https_conn_class if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, True), req, **kwargs) class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def extract_timezone(date_str): m = re.search( r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group('tz'))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) return timezone, date_str def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: timezone, date_str = extract_timezone(date_str) try: date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) except ValueError: pass def date_formats(day_first=True): return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) _, date_str = extract_timezone(date_str) for expression in date_formats(day_first): try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: try: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') except ValueError: pass if upload_date is not None: return compat_str(upload_date) def unified_timestamp(date_str, day_first=True): if date_str is None: return None date_str = date_str.replace(',', ' ') pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0 timezone, date_str = extract_timezone(date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) for expression in date_formats(day_first): try: dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta) return calendar.timegm(dt.timetuple()) except ValueError: pass timetuple = email.utils.parsedate_tz(date_str) if timetuple: return calendar.timegm(timetuple) + pm_delta * 3600 def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download elif guess.rstrip('/') in KNOWN_EXTENSIONS: return guess.rstrip('/') else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad approximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, '%Y%m%d').date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( (b'GetStdHandle', ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( (b'GetConsoleMode', ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return compat_struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: # Some platforms, such as Jython, is missing fcntl try: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) except ImportError: UNSUPPORTED_MSG = 'file locking is not supported on this platform' def _lock_file(f, exclusive): raise IOError(UNSUPPORTED_MSG) def _unlock_file(f): raise IOError(UNSUPPORTED_MSG) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ url, idata = unsmuggle_url(url, {}) data.update(idata) sdata = compat_urllib_parse_urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def lookup_unit_table(unit_table, s): units_re = '|'.join(re.escape(u) for u in unit_table) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = unit_table[m.group('unit')] return int(float(num_str) * mult) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and unofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'bytes': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'kb': 1000, 'kilobytes': 1000, 'kibibytes': 1024, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'mb': 1000 ** 2, 'megabytes': 1000 ** 2, 'mebibytes': 1024 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'gb': 1000 ** 3, 'gigabytes': 1000 ** 3, 'gibibytes': 1024 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'tb': 1000 ** 4, 'terabytes': 1000 ** 4, 'tebibytes': 1024 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'pb': 1000 ** 5, 'petabytes': 1000 ** 5, 'pebibytes': 1024 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'eb': 1000 ** 6, 'exabytes': 1000 ** 6, 'exbibytes': 1024 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'zb': 1000 ** 7, 'zettabytes': 1000 ** 7, 'zebibytes': 1024 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, 'yb': 1000 ** 8, 'yottabytes': 1000 ** 8, 'yobibytes': 1024 ** 8, } return lookup_unit_table(_UNIT_TABLE, s) def parse_count(s): if s is None: return None s = s.strip() if re.match(r'^[\d,.]+$', s): return str_to_int(s) _UNIT_TABLE = { 'k': 1000, 'K': 1000, 'm': 1000 ** 2, 'M': 1000 ** 2, 'kk': 1000 ** 2, 'KK': 1000 ** 2, } return lookup_unit_table(_UNIT_TABLE, s) def month_by_name(name, lang='en'): """ Return the number of a month by (locale-independently) English name """ month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en']) try: return month_names.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) # ctypes in Jython is not complete # http://bugs.jython.org/issue2148 if sys.platform.startswith('java'): return try: libc = ctypes.cdll.LoadLibrary('libc.so.6') except OSError: return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): return s[len(start):] if s is not None and s.startswith(start) else s def remove_end(s, end): return s[:-len(end)] if s is not None and s.endswith(end) else s def remove_quotes(s): if s is None or len(s) < 2: return s for quote in ('"', "'", ): if s[0] == quote and s[-1] == quote: return s[1:-1] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] def base_url(url): return re.match(r'https?://[^?#&]+/', url).group() def urljoin(base, path): if not isinstance(path, compat_str) or not path: return None if re.match(r'^(?:https?:)?//', path): return path if not isinstance(base, compat_str) or not re.match(r'^(?:https?:)?//', base): return None return compat_urlparse.urljoin(base, path) class HEADRequest(compat_urllib_request.Request): def get_method(self): return 'HEAD' class PUTRequest(compat_urllib_request.Request): def get_method(self): return 'PUT' def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def strip_or_none(v): return None if v is None else v.strip() def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() days, hours, mins, secs, ms = [None] * 5 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match( r'''(?ix)(?:P?T)? (?: (?P<days>[0-9]+)\s*d(?:ays?)?\s* )? (?: (?P<hours>[0-9]+)\s*h(?:ours?)?\s* )? (?: (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s* )? (?: (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s* )?Z?$''', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s) if m: hours, mins = m.groups() else: return None duration = 0 if secs: duration += float(secs) if mins: duration += float(mins) * 60 if hours: duration += float(hours) * 60 * 60 if days: duration += float(days) * 24 * 60 * 60 if ms: duration += float(ms) return duration def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers # SIGTTOU if youtube-dl is run in the background. # See https://github.com/rg3/youtube-dl/issues/955#issuecomment-209789656 out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize, use_cache=False): self._pagefunc = pagefunc self._pagesize = pagesize self._use_cache = use_cache if use_cache: self._cache = {} def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = None if self._use_cache: page_results = self._cache.get(pagenum) if page_results is None: page_results = list(self._pagefunc(pagenum)) if self._use_cache: self._cache[pagenum] = page_results startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( netloc=url_parsed.netloc.encode('idna').decode('ascii'), path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii') def update_url_query(url, query): if not query: return url parsed_url = compat_urlparse.urlparse(url) qs = compat_parse_qs(parsed_url.query) qs.update(query) return compat_urlparse.urlunparse(parsed_url._replace( query=compat_urllib_parse_urlencode(qs, True))) def update_Request(req, url=None, data=None, headers={}, query={}): req_headers = req.headers.copy() req_headers.update(headers) req_data = data or req.data req_url = update_url_query(url or req.get_full_url(), query) req_get_method = req.get_method() if req_get_method == 'HEAD': req_type = HEADRequest elif req_get_method == 'PUT': req_type = PUTRequest else: req_type = compat_urllib_request.Request new_req = req_type( req_url, data=req_data, headers=req_headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) if hasattr(req, 'timeout'): new_req.timeout = req.timeout return new_req def dict_get(d, key_or_keys, default=None, skip_false_values=True): if isinstance(key_or_keys, (list, tuple)): for key in key_or_keys: if key not in d or d[key] is None or skip_false_values and not d[key]: continue return d[key] return default return d.get(key_or_keys, default) def try_get(src, getter, expected_type=None): try: v = getter(src) except (AttributeError, KeyError, TypeError, IndexError): pass else: if expected_type is None or isinstance(v, expected_type): return v def encode_compat_str(string, encoding=preferredencoding(), errors='strict'): return string if isinstance(string, compat_str) else compat_str(string, encoding, errors) US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } TV_PARENTAL_GUIDELINES = { 'TV-Y': 0, 'TV-Y7': 7, 'TV-G': 0, 'TV-PG': 0, 'TV-14': 14, 'TV-MA': 17, } def parse_age_limit(s): if type(s) == int: return s if 0 <= s <= 21 else None if not isinstance(s, compat_basestring): return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) if m: return int(m.group('age')) if s in US_RATINGS: return US_RATINGS[s] return TV_PARENTAL_GUIDELINES.get(s) def strip_jsonp(code): return re.sub( r'(?s)^[a-zA-Z0-9_.$]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code) def js_to_json(code): def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v elif v.startswith('/*') or v.startswith('//') or v == ',': return "" if v[0] in ("'", '"'): v = re.sub(r'(?s)\\.|"', lambda m: { '"': '\\"', "\\'": "'", '\\\n': '', '\\x': '\\u00', }.get(m.group(0), m.group(0)), v[1:-1]) INTEGER_TABLE = ( (r'^(0[xX][0-9a-fA-F]+)\s*:?$', 16), (r'^(0+[0-7]+)\s*:?$', 8), ) for regex, base in INTEGER_TABLE: im = re.match(regex, v) if im: i = int(im.group(1), base) return '"%d":' % i if v.endswith(':') else '%d' % i return '"%s"' % v return re.sub(r'''(?sx) "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'| /\*.*?\*/|//[^\n]*|,(?=\s*[\]}])| [a-zA-Z_][.a-zA-Z_0-9]*| \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:\s*:)?| [0-9]+(?=\s*:) ''', fix_kv, code) def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(compat_shlex_quote(a) for a in args) def error_to_compat_str(err): err_str = str(err) # On python 2 error byte string must be decoded with proper # encoding rather than ascii if sys.version_info[0] < 3: err_str = err_str.decode(preferredencoding()) return err_str def mimetype2ext(mt): if mt is None: return None ext = { 'audio/mp4': 'm4a', # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as # it's the most popular one 'audio/mpeg': 'mp3', }.get(mt) if ext is not None: return ext _, _, res = mt.rpartition('/') res = res.split(';')[0].strip().lower() return { '3gpp': '3gp', 'smptett+xml': 'tt', 'srt': 'srt', 'ttaf+xml': 'dfxp', 'ttml+xml': 'ttml', 'vtt': 'vtt', 'x-flv': 'flv', 'x-mp4-fragmented': 'mp4', 'x-ms-wmv': 'wmv', 'mpegurl': 'm3u8', 'x-mpegurl': 'm3u8', 'vnd.apple.mpegurl': 'm3u8', 'dash+xml': 'mpd', 'f4m': 'f4m', 'f4m+xml': 'f4m', 'hds+xml': 'f4m', 'vnd.ms-sstr+xml': 'ism', 'quicktime': 'mov', }.get(res, res) def parse_codecs(codecs_str): # http://tools.ietf.org/html/rfc6381 if not codecs_str: return {} splited_codecs = list(filter(None, map( lambda str: str.strip(), codecs_str.strip().strip(',').split(',')))) vcodec, acodec = None, None for full_codec in splited_codecs: codec = full_codec.split('.')[0] if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v'): if not vcodec: vcodec = full_codec elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3'): if not acodec: acodec = full_codec else: write_string('WARNING: Unknown codec %s' % full_codec, sys.stderr) if not vcodec and not acodec: if len(splited_codecs) == 2: return { 'vcodec': vcodec, 'acodec': acodec, } elif len(splited_codecs) == 1: return { 'vcodec': 'none', 'acodec': vcodec, } else: return { 'vcodec': vcodec or 'none', 'acodec': acodec or 'none', } return {} def urlhandle_detect_ext(url_handle): getheader = url_handle.headers.get cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) if (m.group('strval') is not None or # If the original field is a string and matching comparisonvalue is # a number we should respect the origin of the original field # and process comparison value as a string (see # https://github.com/rg3/youtube-dl/issues/11082). actual_value is not None and m.group('intval') is not None and isinstance(actual_value, compat_str)): if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('strval') or m.group('intval') else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.')) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): _x = functools.partial(xpath_with_ns, ns_map={ 'ttml': 'http://www.w3.org/ns/ttml', 'ttaf1': 'http://www.w3.org/2006/10/ttaf1', 'ttaf1_0604': 'http://www.w3.org/2006/04/ttaf1', }) class TTMLPElementParser(object): out = '' def start(self, tag, attrib): if tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'): self.out += '\n' def end(self, tag): pass def data(self, data): self.out += data def close(self): return self.out.strip() def parse_node(node): target = TTMLPElementParser() parser = xml.etree.ElementTree.XMLParser(target=target) parser.feed(xml.etree.ElementTree.tostring(node)) return parser.close() dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8')) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall(_x('.//ttaf1_0604:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) dur = parse_dfxp_time_expr(para.attrib.get('dur')) if begin_time is None: continue if not end_time: if not dur: continue end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) if param: param = compat_str(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) return compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'): req.add_header('Ytdl-socks-proxy', proxy) # youtube-dl's http/https handlers do wrapping the socket with socks return None return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type) def ohdave_rsa_encrypt(data, exponent, modulus): ''' Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/ Input: data: data to encrypt, bytes-like object exponent, modulus: parameter e and N of RSA algorithm, both integer Output: hex string of encrypted data Limitation: supports one block encryption only ''' payload = int(binascii.hexlify(data[::-1]), 16) encrypted = pow(payload, exponent, modulus) return '%x' % encrypted def encode_base_n(num, n, table=None): FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' if not table: table = FULL_TABLE[:n] if n > len(table): raise ValueError('base %d exceeds table length %d' % (n, len(table))) if num == 0: return table[0] ret = '' while num: ret = table[num % n] + ret num = num // n return ret def decode_packed_codes(code): mobj = re.search(PACKED_CODES_RE, code) obfucasted_code, base, count, symbols = mobj.groups() base = int(base) count = int(count) symbols = symbols.split('|') symbol_table = {} while count: count -= 1 base_n_count = encode_base_n(count, base) symbol_table[base_n_count] = symbols[count] or base_n_count return re.sub( r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)], obfucasted_code) def parse_m3u8_attributes(attrib): info = {} for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib): if val.startswith('"'): val = val[1:-1] info[key] = val return info def urshift(val, n): return val >> n if val >= 0 else (val + 0x100000000) >> n # Based on png2str() written by @gdkchan and improved by @yokrysty # Originally posted at https://github.com/rg3/youtube-dl/issues/9706 def decode_png(png_data): # Reference: https://www.w3.org/TR/PNG/ header = png_data[8:] if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR': raise IOError('Not a valid PNG file.') int_map = {1: '>B', 2: '>H', 4: '>I'} unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0] chunks = [] while header: length = unpack_integer(header[:4]) header = header[4:] chunk_type = header[:4] header = header[4:] chunk_data = header[:length] header = header[length:] header = header[4:] # Skip CRC chunks.append({ 'type': chunk_type, 'length': length, 'data': chunk_data }) ihdr = chunks[0]['data'] width = unpack_integer(ihdr[:4]) height = unpack_integer(ihdr[4:8]) idat = b'' for chunk in chunks: if chunk['type'] == b'IDAT': idat += chunk['data'] if not idat: raise IOError('Unable to read PNG data.') decompressed_data = bytearray(zlib.decompress(idat)) stride = width * 3 pixels = [] def _get_pixel(idx): x = idx % stride y = idx // stride return pixels[y][x] for y in range(height): basePos = y * (1 + stride) filter_type = decompressed_data[basePos] current_row = [] pixels.append(current_row) for x in range(stride): color = decompressed_data[1 + basePos + x] basex = y * stride + x left = 0 up = 0 if x > 2: left = _get_pixel(basex - 3) if y > 0: up = _get_pixel(basex - stride) if filter_type == 1: # Sub color = (color + left) & 0xff elif filter_type == 2: # Up color = (color + up) & 0xff elif filter_type == 3: # Average color = (color + ((left + up) >> 1)) & 0xff elif filter_type == 4: # Paeth a = left b = up c = 0 if x > 2 and y > 0: c = _get_pixel(basex - stride - 3) p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: color = (color + a) & 0xff elif pb <= pc: color = (color + b) & 0xff else: color = (color + c) & 0xff current_row.append(color) return width, height, pixels def write_xattr(path, key, value): # This mess below finds the best xattr tool for the job try: # try the pyxattr module... import xattr if hasattr(xattr, 'set'): # pyxattr # Unicode arguments are not supported in python-pyxattr until # version 0.5.0 # See https://github.com/rg3/youtube-dl/issues/5498 pyxattr_required_version = '0.5.0' if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version): # TODO: fallback to CLI tools raise XAttrUnavailableError( 'python-pyxattr is detected but is too old. ' 'youtube-dl requires %s or above while your version is %s. ' 'Falling back to other xattr implementations' % ( pyxattr_required_version, xattr.__version__)) setxattr = xattr.set else: # xattr setxattr = xattr.setxattr try: setxattr(path, key, value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) except ImportError: if compat_os_name == 'nt': # Write xattrs to NTFS Alternate Data Streams: # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29 assert ':' not in key assert os.path.exists(path) ads_fn = path + ':' + key try: with open(ads_fn, 'wb') as f: f.write(value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) else: user_has_setfattr = check_executable('setfattr', ['--version']) user_has_xattr = check_executable('xattr', ['-h']) if user_has_setfattr or user_has_xattr: value = value.decode('utf-8') if user_has_setfattr: executable = 'setfattr' opts = ['-n', key, '-v', value] elif user_has_xattr: executable = 'xattr' opts = ['-w', key, value] cmd = ([encodeFilename(executable, True)] + [encodeArgument(o) for o in opts] + [encodeFilename(path, True)]) try: p = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) stdout, stderr = p.communicate() stderr = stderr.decode('utf-8', 'replace') if p.returncode != 0: raise XAttrMetadataError(p.returncode, stderr) else: # On Unix, and can't find pyxattr, setfattr, or xattr. if sys.platform.startswith('linux'): raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'pyxattr' or 'xattr' " "modules, or the GNU 'attr' package " "(which contains the 'setfattr' tool).") else: raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'xattr' module, " "or the 'xattr' binary.")
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-23-fixed/youtube-dl/youtube_dl/utils.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-23-buggy/youtube-dl/youtube_dl/utils.py
youtube-dl-bug-1
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals import base64 import binascii import calendar import codecs import contextlib import ctypes import datetime import email.utils import email.header import errno import functools import gzip import io import itertools import json import locale import math import operator import os import platform import random import re import socket import ssl import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_HTMLParseError, compat_HTMLParser, compat_basestring, compat_chr, compat_ctypes_WINFUNCTYPE, compat_etree_fromstring, compat_expanduser, compat_html_entities, compat_html_entities_html5, compat_http_client, compat_kwargs, compat_os_name, compat_parse_qs, compat_shlex_quote, compat_socket_create_connection, compat_str, compat_struct_pack, compat_struct_unpack, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, compat_urllib_parse_unquote_plus, compat_urllib_request, compat_urlparse, compat_xpath, ) from .socks import ( ProxyType, sockssocket, ) def register_socks_protocols(): # "Register" SOCKS protocols # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904 # URLs with protocols not in urlparse.uses_netloc are not handled correctly for scheme in ('socks', 'socks4', 'socks4a', 'socks5'): if scheme not in compat_urlparse.uses_netloc: compat_urlparse.uses_netloc.append(scheme) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:59.0) Gecko/20100101 Firefox/59.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } USER_AGENTS = { 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] MONTH_NAMES = { 'en': ENGLISH_MONTH_NAMES, 'fr': [ 'janvier', 'février', 'mars', 'avril', 'mai', 'juin', 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'], } KNOWN_EXTENSIONS = ( 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', 'flv', 'f4v', 'f4a', 'f4b', 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', 'mkv', 'mka', 'mk3d', 'avi', 'divx', 'mov', 'asf', 'wmv', 'wma', '3gp', '3g2', 'mp3', 'flac', 'ape', 'wav', 'f4f', 'f4m', 'm3u8', 'smil') # needed for sanitizing filenames in restricted mode ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'], 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy'))) DATE_FORMATS = ( '%d %B %Y', '%d %b %Y', '%B %d %Y', '%B %dst %Y', '%B %dnd %Y', '%B %dth %Y', '%b %d %Y', '%b %dst %Y', '%b %dnd %Y', '%b %dth %Y', '%b %dst %Y %I:%M', '%b %dnd %Y %I:%M', '%b %dth %Y %I:%M', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', '%b %d %Y at %H:%M', '%b %d %Y at %H:%M:%S', '%B %d %Y at %H:%M', '%B %d %Y at %H:%M:%S', ) DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS) DATE_FORMATS_DAY_FIRST.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d.%m.%y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS) DATE_FORMATS_MONTH_FIRST.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)" def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): for f in node.findall(compat_xpath(xpath)): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): return node.find(compat_xpath(xpath)) if isinstance(xpath, (str, compat_str)): n = _find_xpath(xpath) else: for xp in xpath: n = _find_xpath(xp) if n is not None: break if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute('id', id, html) def get_element_by_class(class_name, html): """Return the content of the first tag with the specified class in the passed HTML document""" retval = get_elements_by_class(class_name, html) return retval[0] if retval else None def get_element_by_attribute(attribute, value, html, escape_value=True): retval = get_elements_by_attribute(attribute, value, html, escape_value) return retval[0] if retval else None def get_elements_by_class(class_name, html): """Return the content of all tags with the specified class in the passed HTML document as a list""" return get_elements_by_attribute( 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name), html, escape_value=False) def get_elements_by_attribute(attribute, value, html, escape_value=True): """Return the content of the tag with the specified attribute in the passed HTML document""" value = re.escape(value) if escape_value else value retlist = [] for m in re.finditer(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), value), html): res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] retlist.append(unescapeHTML(res)) return retlist class HTMLAttributeParser(compat_HTMLParser): """Trivial HTML parser to gather the attributes for a single element""" def __init__(self): self.attrs = {} compat_HTMLParser.__init__(self) def handle_starttag(self, tag, attrs): self.attrs = dict(attrs) def extract_attributes(html_element): """Given a string for an HTML element such as <el a="foo" B="bar" c="&98;az" d=boz empty= noval entity="&amp;" sq='"' dq="'" > Decode and return a dictionary of attributes. { 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz', 'empty': '', 'noval': None, 'entity': '&', 'sq': '"', 'dq': '\'' }. NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions, but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5. """ parser = HTMLAttributeParser() try: parser.feed(html_element) parser.close() # Older Python may throw HTMLParseError in case of malformed HTML except compat_HTMLParseError: pass return parser.attrs def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'(?u)\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'(?u)<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible. """ def replace_insane(char): if restricted and char in ACCENT_CHARS: return ACCENT_CHARS[char] if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) def sanitize_url(url): # Prepend protocol-less URLs with `http:` scheme in order to mitigate # the number of unwanted failures due to missing protocol if url.startswith('//'): return 'http:%s' % url # Fix some common typos seen so far COMMON_TYPOS = ( # https://github.com/rg3/youtube-dl/issues/15649 (r'^httpss://', r'https://'), # https://bx1.be/lives/direct-tv/ (r'^rmtp([es]?)://', r'rtmp\1://'), ) for mistake, fixup in COMMON_TYPOS: if re.match(mistake, url): return re.sub(mistake, fixup, url) return url def sanitized_Request(url, *args, **kwargs): return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs) def expand_path(s): """Expand shell variables and ~""" return os.path.expandvars(compat_expanduser(s)) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity_with_semicolon): """Transforms an HTML entity to a character.""" entity = entity_with_semicolon[:-1] # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) # TODO: HTML5 allows entities without a semicolon. For example, # '&Eacuteric' should be decoded as 'Éric'. if entity_with_semicolon in compat_html_entities_html5: return compat_html_entities_html5[entity_with_semicolon] mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 # See https://github.com/rg3/youtube-dl/issues/7518 try: return compat_chr(int(numstr, base)) except ValueError: pass # Unknown entity in name, return its literal representation return '&%s;' % entity def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible if sys.platform.startswith('java'): return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class YoutubeDLError(Exception): """Base exception for YoutubeDL errors.""" pass class ExtractorError(YoutubeDLError): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class GeoRestrictedError(ExtractorError): """Geographic restriction Error exception. This exception may be thrown when a video is not available from your geographic location due to geographic restrictions imposed by a website. """ def __init__(self, msg, countries=None): super(GeoRestrictedError, self).__init__(msg, expected=True) self.msg = msg self.countries = countries class DownloadError(YoutubeDLError): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(YoutubeDLError): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(YoutubeDLError): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): super(PostProcessingError, self).__init__(msg) self.msg = msg class MaxDownloadsReached(YoutubeDLError): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(YoutubeDLError): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(YoutubeDLError): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): super(ContentTooShortError, self).__init__( 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected) ) # Both in bytes self.downloaded = downloaded self.expected = expected class XAttrMetadataError(YoutubeDLError): def __init__(self, code=None, msg='Unknown error'): super(XAttrMetadataError, self).__init__(msg) self.code = code self.msg = msg # Parsing code and msg if (self.code in (errno.ENOSPC, errno.EDQUOT) or 'No space left' in self.msg or 'Disk quota excedded' in self.msg): self.reason = 'NO_SPACE' elif self.code == errno.E2BIG or 'Argument list too long' in self.msg: self.reason = 'VALUE_TOO_LONG' else: self.reason = 'NOT_SUPPORTED' class XAttrUnavailableError(YoutubeDLError): pass def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs['strict'] = True hc = http_class(*args, **compat_kwargs(kwargs)) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc def handle_youtubedl_headers(headers): filtered_headers = headers if 'Youtubedl-no-compression' in filtered_headers: filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') del filtered_headers['Youtubedl-no-compression'] return filtered_headers class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-no-compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): conn_class = compat_http_client.HTTPConnection socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req = update_Request(req, url=url_escaped) for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) req.headers = handle_youtubedl_headers(req.headers) if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') else: location = location.decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] if sys.version_info < (3, 0): location_escaped = location_escaped.encode('utf-8') resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response def make_socks_conn_class(base_class, socks_proxy): assert issubclass(base_class, ( compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection)) url_components = compat_urlparse.urlparse(socks_proxy) if url_components.scheme.lower() == 'socks5': socks_type = ProxyType.SOCKS5 elif url_components.scheme.lower() in ('socks', 'socks4'): socks_type = ProxyType.SOCKS4 elif url_components.scheme.lower() == 'socks4a': socks_type = ProxyType.SOCKS4A def unquote_if_non_empty(s): if not s: return s return compat_urllib_parse_unquote_plus(s) proxy_args = ( socks_type, url_components.hostname, url_components.port or 1080, True, # Remote DNS unquote_if_non_empty(url_components.username), unquote_if_non_empty(url_components.password), ) class SocksConnection(base_class): def connect(self): self.sock = sockssocket() self.sock.setproxy(*proxy_args) if type(self.timeout) in (int, float): self.sock.settimeout(self.timeout) self.sock.connect((self.host, self.port)) if isinstance(self, compat_http_client.HTTPSConnection): if hasattr(self, '_context'): # Python > 2.6 self.sock = self._context.wrap_socket( self.sock, server_hostname=self.host) else: self.sock = ssl.wrap_socket(self.sock) return SocksConnection class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} conn_class = self._https_conn_class if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, True), req, **kwargs) class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def extract_timezone(date_str): m = re.search( r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group('tz'))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) return timezone, date_str def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: timezone, date_str = extract_timezone(date_str) try: date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) except ValueError: pass def date_formats(day_first=True): return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) _, date_str = extract_timezone(date_str) for expression in date_formats(day_first): try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: try: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') except ValueError: pass if upload_date is not None: return compat_str(upload_date) def unified_timestamp(date_str, day_first=True): if date_str is None: return None date_str = re.sub(r'[,|]', '', date_str) pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0 timezone, date_str = extract_timezone(date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) # Remove unrecognized timezones from ISO 8601 alike timestamps m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str) if m: date_str = date_str[:-len(m.group('tz'))] # Python only supports microseconds, so remove nanoseconds m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str) if m: date_str = m.group(1) for expression in date_formats(day_first): try: dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta) return calendar.timegm(dt.timetuple()) except ValueError: pass timetuple = email.utils.parsedate_tz(date_str) if timetuple: return calendar.timegm(timetuple) + pm_delta * 3600 def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download elif guess.rstrip('/') in KNOWN_EXTENSIONS: return guess.rstrip('/') else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad approximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, '%Y%m%d').date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = compat_ctypes_WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( ('GetStdHandle', ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = compat_ctypes_WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)(('WriteConsoleW', ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = compat_ctypes_WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(('GetFileType', ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = compat_ctypes_WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( ('GetConsoleMode', ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return compat_struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: # Some platforms, such as Jython, is missing fcntl try: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) except ImportError: UNSUPPORTED_MSG = 'file locking is not supported on this platform' def _lock_file(f, exclusive): raise IOError(UNSUPPORTED_MSG) def _unlock_file(f): raise IOError(UNSUPPORTED_MSG) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(compat_shlex_quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ url, idata = unsmuggle_url(url, {}) data.update(idata) sdata = compat_urllib_parse_urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def lookup_unit_table(unit_table, s): units_re = '|'.join(re.escape(u) for u in unit_table) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = unit_table[m.group('unit')] return int(float(num_str) * mult) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and unofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'bytes': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'kb': 1000, 'kilobytes': 1000, 'kibibytes': 1024, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'mb': 1000 ** 2, 'megabytes': 1000 ** 2, 'mebibytes': 1024 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'gb': 1000 ** 3, 'gigabytes': 1000 ** 3, 'gibibytes': 1024 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'tb': 1000 ** 4, 'terabytes': 1000 ** 4, 'tebibytes': 1024 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'pb': 1000 ** 5, 'petabytes': 1000 ** 5, 'pebibytes': 1024 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'eb': 1000 ** 6, 'exabytes': 1000 ** 6, 'exbibytes': 1024 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'zb': 1000 ** 7, 'zettabytes': 1000 ** 7, 'zebibytes': 1024 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, 'yb': 1000 ** 8, 'yottabytes': 1000 ** 8, 'yobibytes': 1024 ** 8, } return lookup_unit_table(_UNIT_TABLE, s) def parse_count(s): if s is None: return None s = s.strip() if re.match(r'^[\d,.]+$', s): return str_to_int(s) _UNIT_TABLE = { 'k': 1000, 'K': 1000, 'm': 1000 ** 2, 'M': 1000 ** 2, 'kk': 1000 ** 2, 'KK': 1000 ** 2, } return lookup_unit_table(_UNIT_TABLE, s) def parse_resolution(s): if s is None: return {} mobj = re.search(r'\b(?P<w>\d+)\s*[xX×]\s*(?P<h>\d+)\b', s) if mobj: return { 'width': int(mobj.group('w')), 'height': int(mobj.group('h')), } mobj = re.search(r'\b(\d+)[pPiI]\b', s) if mobj: return {'height': int(mobj.group(1))} mobj = re.search(r'\b([48])[kK]\b', s) if mobj: return {'height': int(mobj.group(1)) * 540} return {} def month_by_name(name, lang='en'): """ Return the number of a month by (locale-independently) English name """ month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en']) try: return month_names.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) # ctypes in Jython is not complete # http://bugs.jython.org/issue2148 if sys.platform.startswith('java'): return try: libc = ctypes.cdll.LoadLibrary('libc.so.6') except OSError: return except TypeError: # LoadLibrary in Windows Python 2.7.13 only expects # a bytestring, but since unicode_literals turns # every string into a unicode string, it fails. return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): return s[len(start):] if s is not None and s.startswith(start) else s def remove_end(s, end): return s[:-len(end)] if s is not None and s.endswith(end) else s def remove_quotes(s): if s is None or len(s) < 2: return s for quote in ('"', "'", ): if s[0] == quote and s[-1] == quote: return s[1:-1] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] def base_url(url): return re.match(r'https?://[^?#&]+/', url).group() def urljoin(base, path): if isinstance(path, bytes): path = path.decode('utf-8') if not isinstance(path, compat_str) or not path: return None if re.match(r'^(?:https?:)?//', path): return path if isinstance(base, bytes): base = base.decode('utf-8') if not isinstance(base, compat_str) or not re.match( r'^(?:https?:)?//', base): return None return compat_urlparse.urljoin(base, path) class HEADRequest(compat_urllib_request.Request): def get_method(self): return 'HEAD' class PUTRequest(compat_urllib_request.Request): def get_method(self): return 'PUT' def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def bool_or_none(v, default=None): return v if isinstance(v, bool) else default def strip_or_none(v): return None if v is None else v.strip() def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() days, hours, mins, secs, ms = [None] * 5 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match( r'''(?ix)(?:P? (?: [0-9]+\s*y(?:ears?)?\s* )? (?: [0-9]+\s*m(?:onths?)?\s* )? (?: [0-9]+\s*w(?:eeks?)?\s* )? (?: (?P<days>[0-9]+)\s*d(?:ays?)?\s* )? T)? (?: (?P<hours>[0-9]+)\s*h(?:ours?)?\s* )? (?: (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s* )? (?: (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s* )?Z?$''', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s) if m: hours, mins = m.groups() else: return None duration = 0 if secs: duration += float(secs) if mins: duration += float(mins) * 60 if hours: duration += float(hours) * 60 * 60 if days: duration += float(days) * 24 * 60 * 60 if ms: duration += float(ms) return duration def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers # SIGTTOU if youtube-dl is run in the background. # See https://github.com/rg3/youtube-dl/issues/955#issuecomment-209789656 out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize, use_cache=True): self._pagefunc = pagefunc self._pagesize = pagesize self._use_cache = use_cache if use_cache: self._cache = {} def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = None if self._use_cache: page_results = self._cache.get(pagenum) if page_results is None: page_results = list(self._pagefunc(pagenum)) if self._use_cache: self._cache[pagenum] = page_results startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( netloc=url_parsed.netloc.encode('idna').decode('ascii'), path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii') def update_url_query(url, query): if not query: return url parsed_url = compat_urlparse.urlparse(url) qs = compat_parse_qs(parsed_url.query) qs.update(query) return compat_urlparse.urlunparse(parsed_url._replace( query=compat_urllib_parse_urlencode(qs, True))) def update_Request(req, url=None, data=None, headers={}, query={}): req_headers = req.headers.copy() req_headers.update(headers) req_data = data or req.data req_url = update_url_query(url or req.get_full_url(), query) req_get_method = req.get_method() if req_get_method == 'HEAD': req_type = HEADRequest elif req_get_method == 'PUT': req_type = PUTRequest else: req_type = compat_urllib_request.Request new_req = req_type( req_url, data=req_data, headers=req_headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) if hasattr(req, 'timeout'): new_req.timeout = req.timeout return new_req def _multipart_encode_impl(data, boundary): content_type = 'multipart/form-data; boundary=%s' % boundary out = b'' for k, v in data.items(): out += b'--' + boundary.encode('ascii') + b'\r\n' if isinstance(k, compat_str): k = k.encode('utf-8') if isinstance(v, compat_str): v = v.encode('utf-8') # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578 # suggests sending UTF-8 directly. Firefox sends UTF-8, too content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n' if boundary.encode('ascii') in content: raise ValueError('Boundary overlaps with data') out += content out += b'--' + boundary.encode('ascii') + b'--\r\n' return out, content_type def multipart_encode(data, boundary=None): ''' Encode a dict to RFC 7578-compliant form-data data: A dict where keys and values can be either Unicode or bytes-like objects. boundary: If specified a Unicode object, it's used as the boundary. Otherwise a random boundary is generated. Reference: https://tools.ietf.org/html/rfc7578 ''' has_specified_boundary = boundary is not None while True: if boundary is None: boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff)) try: out, content_type = _multipart_encode_impl(data, boundary) break except ValueError: if has_specified_boundary: raise boundary = None return out, content_type def dict_get(d, key_or_keys, default=None, skip_false_values=True): if isinstance(key_or_keys, (list, tuple)): for key in key_or_keys: if key not in d or d[key] is None or skip_false_values and not d[key]: continue return d[key] return default return d.get(key_or_keys, default) def try_get(src, getter, expected_type=None): if not isinstance(getter, (list, tuple)): getter = [getter] for get in getter: try: v = get(src) except (AttributeError, KeyError, TypeError, IndexError): pass else: if expected_type is None or isinstance(v, expected_type): return v def encode_compat_str(string, encoding=preferredencoding(), errors='strict'): return string if isinstance(string, compat_str) else compat_str(string, encoding, errors) US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } TV_PARENTAL_GUIDELINES = { 'TV-Y': 0, 'TV-Y7': 7, 'TV-G': 0, 'TV-PG': 0, 'TV-14': 14, 'TV-MA': 17, } def parse_age_limit(s): if type(s) == int: return s if 0 <= s <= 21 else None if not isinstance(s, compat_basestring): return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) if m: return int(m.group('age')) if s in US_RATINGS: return US_RATINGS[s] return TV_PARENTAL_GUIDELINES.get(s) def strip_jsonp(code): return re.sub( r'''(?sx)^ (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]+) (?:\s*&&\s*(?P=func_name))? \s*\(\s*(?P<callback_data>.*)\);? \s*?(?://[^\n]*)*$''', r'\g<callback_data>', code) def js_to_json(code): COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*' SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE) INTEGER_TABLE = ( (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16), (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8), ) def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v elif v.startswith('/*') or v.startswith('//') or v == ',': return "" if v[0] in ("'", '"'): v = re.sub(r'(?s)\\.|"', lambda m: { '"': '\\"', "\\'": "'", '\\\n': '', '\\x': '\\u00', }.get(m.group(0), m.group(0)), v[1:-1]) for regex, base in INTEGER_TABLE: im = re.match(regex, v) if im: i = int(im.group(1), base) return '"%d":' % i if v.endswith(':') else '%d' % i return '"%s"' % v return re.sub(r'''(?sx) "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'| {comment}|,(?={skip}[\]}}])| (?:(?<![0-9])[eE]|[a-df-zA-DF-Z_])[.a-zA-Z_0-9]*| \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?| [0-9]+(?={skip}:) '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code) def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(compat_shlex_quote(a) for a in args) def error_to_compat_str(err): err_str = str(err) # On python 2 error byte string must be decoded with proper # encoding rather than ascii if sys.version_info[0] < 3: err_str = err_str.decode(preferredencoding()) return err_str def mimetype2ext(mt): if mt is None: return None ext = { 'audio/mp4': 'm4a', # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as # it's the most popular one 'audio/mpeg': 'mp3', }.get(mt) if ext is not None: return ext _, _, res = mt.rpartition('/') res = res.split(';')[0].strip().lower() return { '3gpp': '3gp', 'smptett+xml': 'tt', 'ttaf+xml': 'dfxp', 'ttml+xml': 'ttml', 'x-flv': 'flv', 'x-mp4-fragmented': 'mp4', 'x-ms-sami': 'sami', 'x-ms-wmv': 'wmv', 'mpegurl': 'm3u8', 'x-mpegurl': 'm3u8', 'vnd.apple.mpegurl': 'm3u8', 'dash+xml': 'mpd', 'f4m+xml': 'f4m', 'hds+xml': 'f4m', 'vnd.ms-sstr+xml': 'ism', 'quicktime': 'mov', 'mp2t': 'ts', }.get(res, res) def parse_codecs(codecs_str): # http://tools.ietf.org/html/rfc6381 if not codecs_str: return {} splited_codecs = list(filter(None, map( lambda str: str.strip(), codecs_str.strip().strip(',').split(',')))) vcodec, acodec = None, None for full_codec in splited_codecs: codec = full_codec.split('.')[0] if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v', 'hvc1'): if not vcodec: vcodec = full_codec elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'): if not acodec: acodec = full_codec else: write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr) if not vcodec and not acodec: if len(splited_codecs) == 2: return { 'vcodec': vcodec, 'acodec': acodec, } elif len(splited_codecs) == 1: return { 'vcodec': 'none', 'acodec': vcodec, } else: return { 'vcodec': vcodec or 'none', 'acodec': acodec or 'none', } return {} def urlhandle_detect_ext(url_handle): getheader = url_handle.headers.get cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<quote>["\'])(?P<quotedstrval>(?:\\.|(?!(?P=quote)|\\).)+?)(?P=quote)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) if (m.group('quotedstrval') is not None or m.group('strval') is not None or # If the original field is a string and matching comparisonvalue is # a number we should respect the origin of the original field # and process comparison value as a string (see # https://github.com/rg3/youtube-dl/issues/11082). actual_value is not None and m.group('intval') is not None and isinstance(actual_value, compat_str)): if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval') quote = m.group('quote') if quote is not None: comparison_value = comparison_value.replace(r'\%s' % quote, quote) else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.')) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): ''' @param dfxp_data A bytes-like object containing DFXP data @returns A unicode object containing converted SRT data ''' LEGACY_NAMESPACES = ( (b'http://www.w3.org/ns/ttml', [ b'http://www.w3.org/2004/11/ttaf1', b'http://www.w3.org/2006/04/ttaf1', b'http://www.w3.org/2006/10/ttaf1', ]), (b'http://www.w3.org/ns/ttml#styling', [ b'http://www.w3.org/ns/ttml#style', ]), ) SUPPORTED_STYLING = [ 'color', 'fontFamily', 'fontSize', 'fontStyle', 'fontWeight', 'textDecoration' ] _x = functools.partial(xpath_with_ns, ns_map={ 'ttml': 'http://www.w3.org/ns/ttml', 'tts': 'http://www.w3.org/ns/ttml#styling', }) styles = {} default_style = {} class TTMLPElementParser(object): _out = '' _unclosed_elements = [] _applied_styles = [] def start(self, tag, attrib): if tag in (_x('ttml:br'), 'br'): self._out += '\n' else: unclosed_elements = [] style = {} element_style_id = attrib.get('style') if default_style: style.update(default_style) if element_style_id: style.update(styles.get(element_style_id, {})) for prop in SUPPORTED_STYLING: prop_val = attrib.get(_x('tts:' + prop)) if prop_val: style[prop] = prop_val if style: font = '' for k, v in sorted(style.items()): if self._applied_styles and self._applied_styles[-1].get(k) == v: continue if k == 'color': font += ' color="%s"' % v elif k == 'fontSize': font += ' size="%s"' % v elif k == 'fontFamily': font += ' face="%s"' % v elif k == 'fontWeight' and v == 'bold': self._out += '<b>' unclosed_elements.append('b') elif k == 'fontStyle' and v == 'italic': self._out += '<i>' unclosed_elements.append('i') elif k == 'textDecoration' and v == 'underline': self._out += '<u>' unclosed_elements.append('u') if font: self._out += '<font' + font + '>' unclosed_elements.append('font') applied_style = {} if self._applied_styles: applied_style.update(self._applied_styles[-1]) applied_style.update(style) self._applied_styles.append(applied_style) self._unclosed_elements.append(unclosed_elements) def end(self, tag): if tag not in (_x('ttml:br'), 'br'): unclosed_elements = self._unclosed_elements.pop() for element in reversed(unclosed_elements): self._out += '</%s>' % element if unclosed_elements and self._applied_styles: self._applied_styles.pop() def data(self, data): self._out += data def close(self): return self._out.strip() def parse_node(node): target = TTMLPElementParser() parser = xml.etree.ElementTree.XMLParser(target=target) parser.feed(xml.etree.ElementTree.tostring(node)) return parser.close() for k, v in LEGACY_NAMESPACES: for ns in v: dfxp_data = dfxp_data.replace(ns, k) dfxp = compat_etree_fromstring(dfxp_data) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') repeat = False while True: for style in dfxp.findall(_x('.//ttml:style')): style_id = style.get('id') parent_style_id = style.get('style') if parent_style_id: if parent_style_id not in styles: repeat = True continue styles[style_id] = styles[parent_style_id].copy() for prop in SUPPORTED_STYLING: prop_val = style.get(_x('tts:' + prop)) if prop_val: styles.setdefault(style_id, {})[prop] = prop_val if repeat: repeat = False else: break for p in ('body', 'div'): ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p]) if ele is None: continue style = styles.get(ele.get('style')) if not style: continue default_style.update(style) for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) dur = parse_dfxp_time_expr(para.attrib.get('dur')) if begin_time is None: continue if not end_time: if not dur: continue end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) if param: param = compat_str(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) if param is None: return [] assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class GeoUtils(object): # Major IPv4 address blocks per country _country_ip_map = { 'AD': '85.94.160.0/19', 'AE': '94.200.0.0/13', 'AF': '149.54.0.0/17', 'AG': '209.59.64.0/18', 'AI': '204.14.248.0/21', 'AL': '46.99.0.0/16', 'AM': '46.70.0.0/15', 'AO': '105.168.0.0/13', 'AP': '159.117.192.0/21', 'AR': '181.0.0.0/12', 'AS': '202.70.112.0/20', 'AT': '84.112.0.0/13', 'AU': '1.128.0.0/11', 'AW': '181.41.0.0/18', 'AZ': '5.191.0.0/16', 'BA': '31.176.128.0/17', 'BB': '65.48.128.0/17', 'BD': '114.130.0.0/16', 'BE': '57.0.0.0/8', 'BF': '129.45.128.0/17', 'BG': '95.42.0.0/15', 'BH': '37.131.0.0/17', 'BI': '154.117.192.0/18', 'BJ': '137.255.0.0/16', 'BL': '192.131.134.0/24', 'BM': '196.12.64.0/18', 'BN': '156.31.0.0/16', 'BO': '161.56.0.0/16', 'BQ': '161.0.80.0/20', 'BR': '152.240.0.0/12', 'BS': '24.51.64.0/18', 'BT': '119.2.96.0/19', 'BW': '168.167.0.0/16', 'BY': '178.120.0.0/13', 'BZ': '179.42.192.0/18', 'CA': '99.224.0.0/11', 'CD': '41.243.0.0/16', 'CF': '196.32.200.0/21', 'CG': '197.214.128.0/17', 'CH': '85.0.0.0/13', 'CI': '154.232.0.0/14', 'CK': '202.65.32.0/19', 'CL': '152.172.0.0/14', 'CM': '165.210.0.0/15', 'CN': '36.128.0.0/10', 'CO': '181.240.0.0/12', 'CR': '201.192.0.0/12', 'CU': '152.206.0.0/15', 'CV': '165.90.96.0/19', 'CW': '190.88.128.0/17', 'CY': '46.198.0.0/15', 'CZ': '88.100.0.0/14', 'DE': '53.0.0.0/8', 'DJ': '197.241.0.0/17', 'DK': '87.48.0.0/12', 'DM': '192.243.48.0/20', 'DO': '152.166.0.0/15', 'DZ': '41.96.0.0/12', 'EC': '186.68.0.0/15', 'EE': '90.190.0.0/15', 'EG': '156.160.0.0/11', 'ER': '196.200.96.0/20', 'ES': '88.0.0.0/11', 'ET': '196.188.0.0/14', 'EU': '2.16.0.0/13', 'FI': '91.152.0.0/13', 'FJ': '144.120.0.0/16', 'FM': '119.252.112.0/20', 'FO': '88.85.32.0/19', 'FR': '90.0.0.0/9', 'GA': '41.158.0.0/15', 'GB': '25.0.0.0/8', 'GD': '74.122.88.0/21', 'GE': '31.146.0.0/16', 'GF': '161.22.64.0/18', 'GG': '62.68.160.0/19', 'GH': '45.208.0.0/14', 'GI': '85.115.128.0/19', 'GL': '88.83.0.0/19', 'GM': '160.182.0.0/15', 'GN': '197.149.192.0/18', 'GP': '104.250.0.0/19', 'GQ': '105.235.224.0/20', 'GR': '94.64.0.0/13', 'GT': '168.234.0.0/16', 'GU': '168.123.0.0/16', 'GW': '197.214.80.0/20', 'GY': '181.41.64.0/18', 'HK': '113.252.0.0/14', 'HN': '181.210.0.0/16', 'HR': '93.136.0.0/13', 'HT': '148.102.128.0/17', 'HU': '84.0.0.0/14', 'ID': '39.192.0.0/10', 'IE': '87.32.0.0/12', 'IL': '79.176.0.0/13', 'IM': '5.62.80.0/20', 'IN': '117.192.0.0/10', 'IO': '203.83.48.0/21', 'IQ': '37.236.0.0/14', 'IR': '2.176.0.0/12', 'IS': '82.221.0.0/16', 'IT': '79.0.0.0/10', 'JE': '87.244.64.0/18', 'JM': '72.27.0.0/17', 'JO': '176.29.0.0/16', 'JP': '126.0.0.0/8', 'KE': '105.48.0.0/12', 'KG': '158.181.128.0/17', 'KH': '36.37.128.0/17', 'KI': '103.25.140.0/22', 'KM': '197.255.224.0/20', 'KN': '198.32.32.0/19', 'KP': '175.45.176.0/22', 'KR': '175.192.0.0/10', 'KW': '37.36.0.0/14', 'KY': '64.96.0.0/15', 'KZ': '2.72.0.0/13', 'LA': '115.84.64.0/18', 'LB': '178.135.0.0/16', 'LC': '192.147.231.0/24', 'LI': '82.117.0.0/19', 'LK': '112.134.0.0/15', 'LR': '41.86.0.0/19', 'LS': '129.232.0.0/17', 'LT': '78.56.0.0/13', 'LU': '188.42.0.0/16', 'LV': '46.109.0.0/16', 'LY': '41.252.0.0/14', 'MA': '105.128.0.0/11', 'MC': '88.209.64.0/18', 'MD': '37.246.0.0/16', 'ME': '178.175.0.0/17', 'MF': '74.112.232.0/21', 'MG': '154.126.0.0/17', 'MH': '117.103.88.0/21', 'MK': '77.28.0.0/15', 'ML': '154.118.128.0/18', 'MM': '37.111.0.0/17', 'MN': '49.0.128.0/17', 'MO': '60.246.0.0/16', 'MP': '202.88.64.0/20', 'MQ': '109.203.224.0/19', 'MR': '41.188.64.0/18', 'MS': '208.90.112.0/22', 'MT': '46.11.0.0/16', 'MU': '105.16.0.0/12', 'MV': '27.114.128.0/18', 'MW': '105.234.0.0/16', 'MX': '187.192.0.0/11', 'MY': '175.136.0.0/13', 'MZ': '197.218.0.0/15', 'NA': '41.182.0.0/16', 'NC': '101.101.0.0/18', 'NE': '197.214.0.0/18', 'NF': '203.17.240.0/22', 'NG': '105.112.0.0/12', 'NI': '186.76.0.0/15', 'NL': '145.96.0.0/11', 'NO': '84.208.0.0/13', 'NP': '36.252.0.0/15', 'NR': '203.98.224.0/19', 'NU': '49.156.48.0/22', 'NZ': '49.224.0.0/14', 'OM': '5.36.0.0/15', 'PA': '186.72.0.0/15', 'PE': '186.160.0.0/14', 'PF': '123.50.64.0/18', 'PG': '124.240.192.0/19', 'PH': '49.144.0.0/13', 'PK': '39.32.0.0/11', 'PL': '83.0.0.0/11', 'PM': '70.36.0.0/20', 'PR': '66.50.0.0/16', 'PS': '188.161.0.0/16', 'PT': '85.240.0.0/13', 'PW': '202.124.224.0/20', 'PY': '181.120.0.0/14', 'QA': '37.210.0.0/15', 'RE': '139.26.0.0/16', 'RO': '79.112.0.0/13', 'RS': '178.220.0.0/14', 'RU': '5.136.0.0/13', 'RW': '105.178.0.0/15', 'SA': '188.48.0.0/13', 'SB': '202.1.160.0/19', 'SC': '154.192.0.0/11', 'SD': '154.96.0.0/13', 'SE': '78.64.0.0/12', 'SG': '152.56.0.0/14', 'SI': '188.196.0.0/14', 'SK': '78.98.0.0/15', 'SL': '197.215.0.0/17', 'SM': '89.186.32.0/19', 'SN': '41.82.0.0/15', 'SO': '197.220.64.0/19', 'SR': '186.179.128.0/17', 'SS': '105.235.208.0/21', 'ST': '197.159.160.0/19', 'SV': '168.243.0.0/16', 'SX': '190.102.0.0/20', 'SY': '5.0.0.0/16', 'SZ': '41.84.224.0/19', 'TC': '65.255.48.0/20', 'TD': '154.68.128.0/19', 'TG': '196.168.0.0/14', 'TH': '171.96.0.0/13', 'TJ': '85.9.128.0/18', 'TK': '27.96.24.0/21', 'TL': '180.189.160.0/20', 'TM': '95.85.96.0/19', 'TN': '197.0.0.0/11', 'TO': '175.176.144.0/21', 'TR': '78.160.0.0/11', 'TT': '186.44.0.0/15', 'TV': '202.2.96.0/19', 'TW': '120.96.0.0/11', 'TZ': '156.156.0.0/14', 'UA': '93.72.0.0/13', 'UG': '154.224.0.0/13', 'US': '3.0.0.0/8', 'UY': '167.56.0.0/13', 'UZ': '82.215.64.0/18', 'VA': '212.77.0.0/19', 'VC': '24.92.144.0/20', 'VE': '186.88.0.0/13', 'VG': '172.103.64.0/18', 'VI': '146.226.0.0/16', 'VN': '14.160.0.0/11', 'VU': '202.80.32.0/20', 'WF': '117.20.32.0/21', 'WS': '202.4.32.0/19', 'YE': '134.35.0.0/16', 'YT': '41.242.116.0/22', 'ZA': '41.0.0.0/11', 'ZM': '165.56.0.0/13', 'ZW': '41.85.192.0/19', } @classmethod def random_ipv4(cls, code): block = cls._country_ip_map.get(code.upper()) if not block: return None addr, preflen = block.split('/') addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0] addr_max = addr_min | (0xffffffff >> int(preflen)) return compat_str(socket.inet_ntoa( compat_struct_pack('!L', random.randint(addr_min, addr_max)))) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) return compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'): req.add_header('Ytdl-socks-proxy', proxy) # youtube-dl's http/https handlers do wrapping the socket with socks return None return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type) # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is # released into Public Domain # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387 def long_to_bytes(n, blocksize=0): """long_to_bytes(n:long, blocksize:int) : string Convert a long integer to a byte string. If optional blocksize is given and greater than zero, pad the front of the byte string with binary zeros so that the length is a multiple of blocksize. """ # after much testing, this algorithm was deemed to be the fastest s = b'' n = int(n) while n > 0: s = compat_struct_pack('>I', n & 0xffffffff) + s n = n >> 32 # strip off leading zeros for i in range(len(s)): if s[i] != b'\000'[0]: break else: # only happens when n == 0 s = b'\000' i = 0 s = s[i:] # add back some pad bytes. this could be done more efficiently w.r.t. the # de-padding being done above, but sigh... if blocksize > 0 and len(s) % blocksize: s = (blocksize - len(s) % blocksize) * b'\000' + s return s def bytes_to_long(s): """bytes_to_long(string) : long Convert a byte string to a long integer. This is (essentially) the inverse of long_to_bytes(). """ acc = 0 length = len(s) if length % 4: extra = (4 - length % 4) s = b'\000' * extra + s length = length + extra for i in range(0, length, 4): acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0] return acc def ohdave_rsa_encrypt(data, exponent, modulus): ''' Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/ Input: data: data to encrypt, bytes-like object exponent, modulus: parameter e and N of RSA algorithm, both integer Output: hex string of encrypted data Limitation: supports one block encryption only ''' payload = int(binascii.hexlify(data[::-1]), 16) encrypted = pow(payload, exponent, modulus) return '%x' % encrypted def pkcs1pad(data, length): """ Padding input data with PKCS#1 scheme @param {int[]} data input data @param {int} length target length @returns {int[]} padded data """ if len(data) > length - 11: raise ValueError('Input data too long for PKCS#1 padding') pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)] return [0, 2] + pseudo_random + [0] + data def encode_base_n(num, n, table=None): FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' if not table: table = FULL_TABLE[:n] if n > len(table): raise ValueError('base %d exceeds table length %d' % (n, len(table))) if num == 0: return table[0] ret = '' while num: ret = table[num % n] + ret num = num // n return ret def decode_packed_codes(code): mobj = re.search(PACKED_CODES_RE, code) obfucasted_code, base, count, symbols = mobj.groups() base = int(base) count = int(count) symbols = symbols.split('|') symbol_table = {} while count: count -= 1 base_n_count = encode_base_n(count, base) symbol_table[base_n_count] = symbols[count] or base_n_count return re.sub( r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)], obfucasted_code) def parse_m3u8_attributes(attrib): info = {} for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib): if val.startswith('"'): val = val[1:-1] info[key] = val return info def urshift(val, n): return val >> n if val >= 0 else (val + 0x100000000) >> n # Based on png2str() written by @gdkchan and improved by @yokrysty # Originally posted at https://github.com/rg3/youtube-dl/issues/9706 def decode_png(png_data): # Reference: https://www.w3.org/TR/PNG/ header = png_data[8:] if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR': raise IOError('Not a valid PNG file.') int_map = {1: '>B', 2: '>H', 4: '>I'} unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0] chunks = [] while header: length = unpack_integer(header[:4]) header = header[4:] chunk_type = header[:4] header = header[4:] chunk_data = header[:length] header = header[length:] header = header[4:] # Skip CRC chunks.append({ 'type': chunk_type, 'length': length, 'data': chunk_data }) ihdr = chunks[0]['data'] width = unpack_integer(ihdr[:4]) height = unpack_integer(ihdr[4:8]) idat = b'' for chunk in chunks: if chunk['type'] == b'IDAT': idat += chunk['data'] if not idat: raise IOError('Unable to read PNG data.') decompressed_data = bytearray(zlib.decompress(idat)) stride = width * 3 pixels = [] def _get_pixel(idx): x = idx % stride y = idx // stride return pixels[y][x] for y in range(height): basePos = y * (1 + stride) filter_type = decompressed_data[basePos] current_row = [] pixels.append(current_row) for x in range(stride): color = decompressed_data[1 + basePos + x] basex = y * stride + x left = 0 up = 0 if x > 2: left = _get_pixel(basex - 3) if y > 0: up = _get_pixel(basex - stride) if filter_type == 1: # Sub color = (color + left) & 0xff elif filter_type == 2: # Up color = (color + up) & 0xff elif filter_type == 3: # Average color = (color + ((left + up) >> 1)) & 0xff elif filter_type == 4: # Paeth a = left b = up c = 0 if x > 2 and y > 0: c = _get_pixel(basex - stride - 3) p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: color = (color + a) & 0xff elif pb <= pc: color = (color + b) & 0xff else: color = (color + c) & 0xff current_row.append(color) return width, height, pixels def write_xattr(path, key, value): # This mess below finds the best xattr tool for the job try: # try the pyxattr module... import xattr if hasattr(xattr, 'set'): # pyxattr # Unicode arguments are not supported in python-pyxattr until # version 0.5.0 # See https://github.com/rg3/youtube-dl/issues/5498 pyxattr_required_version = '0.5.0' if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version): # TODO: fallback to CLI tools raise XAttrUnavailableError( 'python-pyxattr is detected but is too old. ' 'youtube-dl requires %s or above while your version is %s. ' 'Falling back to other xattr implementations' % ( pyxattr_required_version, xattr.__version__)) setxattr = xattr.set else: # xattr setxattr = xattr.setxattr try: setxattr(path, key, value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) except ImportError: if compat_os_name == 'nt': # Write xattrs to NTFS Alternate Data Streams: # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29 assert ':' not in key assert os.path.exists(path) ads_fn = path + ':' + key try: with open(ads_fn, 'wb') as f: f.write(value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) else: user_has_setfattr = check_executable('setfattr', ['--version']) user_has_xattr = check_executable('xattr', ['-h']) if user_has_setfattr or user_has_xattr: value = value.decode('utf-8') if user_has_setfattr: executable = 'setfattr' opts = ['-n', key, '-v', value] elif user_has_xattr: executable = 'xattr' opts = ['-w', key, value] cmd = ([encodeFilename(executable, True)] + [encodeArgument(o) for o in opts] + [encodeFilename(path, True)]) try: p = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) stdout, stderr = p.communicate() stderr = stderr.decode('utf-8', 'replace') if p.returncode != 0: raise XAttrMetadataError(p.returncode, stderr) else: # On Unix, and can't find pyxattr, setfattr, or xattr. if sys.platform.startswith('linux'): raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'pyxattr' or 'xattr' " "modules, or the GNU 'attr' package " "(which contains the 'setfattr' tool).") else: raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'xattr' module, " "or the 'xattr' binary.") def random_birthday(year_field, month_field, day_field): return { year_field: str(random.randint(1950, 1995)), month_field: str(random.randint(1, 12)), day_field: str(random.randint(1, 31)), } #!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals import base64 import binascii import calendar import codecs import contextlib import ctypes import datetime import email.utils import email.header import errno import functools import gzip import io import itertools import json import locale import math import operator import os import platform import random import re import socket import ssl import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_HTMLParseError, compat_HTMLParser, compat_basestring, compat_chr, compat_ctypes_WINFUNCTYPE, compat_etree_fromstring, compat_expanduser, compat_html_entities, compat_html_entities_html5, compat_http_client, compat_kwargs, compat_os_name, compat_parse_qs, compat_shlex_quote, compat_socket_create_connection, compat_str, compat_struct_pack, compat_struct_unpack, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, compat_urllib_parse_unquote_plus, compat_urllib_request, compat_urlparse, compat_xpath, ) from .socks import ( ProxyType, sockssocket, ) def register_socks_protocols(): # "Register" SOCKS protocols # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904 # URLs with protocols not in urlparse.uses_netloc are not handled correctly for scheme in ('socks', 'socks4', 'socks4a', 'socks5'): if scheme not in compat_urlparse.uses_netloc: compat_urlparse.uses_netloc.append(scheme) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:59.0) Gecko/20100101 Firefox/59.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } USER_AGENTS = { 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] MONTH_NAMES = { 'en': ENGLISH_MONTH_NAMES, 'fr': [ 'janvier', 'février', 'mars', 'avril', 'mai', 'juin', 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'], } KNOWN_EXTENSIONS = ( 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', 'flv', 'f4v', 'f4a', 'f4b', 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', 'mkv', 'mka', 'mk3d', 'avi', 'divx', 'mov', 'asf', 'wmv', 'wma', '3gp', '3g2', 'mp3', 'flac', 'ape', 'wav', 'f4f', 'f4m', 'm3u8', 'smil') # needed for sanitizing filenames in restricted mode ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'], 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy'))) DATE_FORMATS = ( '%d %B %Y', '%d %b %Y', '%B %d %Y', '%B %dst %Y', '%B %dnd %Y', '%B %dth %Y', '%b %d %Y', '%b %dst %Y', '%b %dnd %Y', '%b %dth %Y', '%b %dst %Y %I:%M', '%b %dnd %Y %I:%M', '%b %dth %Y %I:%M', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', '%b %d %Y at %H:%M', '%b %d %Y at %H:%M:%S', '%B %d %Y at %H:%M', '%B %d %Y at %H:%M:%S', ) DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS) DATE_FORMATS_DAY_FIRST.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d.%m.%y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS) DATE_FORMATS_MONTH_FIRST.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)" def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): for f in node.findall(compat_xpath(xpath)): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): return node.find(compat_xpath(xpath)) if isinstance(xpath, (str, compat_str)): n = _find_xpath(xpath) else: for xp in xpath: n = _find_xpath(xp) if n is not None: break if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute('id', id, html) def get_element_by_class(class_name, html): """Return the content of the first tag with the specified class in the passed HTML document""" retval = get_elements_by_class(class_name, html) return retval[0] if retval else None def get_element_by_attribute(attribute, value, html, escape_value=True): retval = get_elements_by_attribute(attribute, value, html, escape_value) return retval[0] if retval else None def get_elements_by_class(class_name, html): """Return the content of all tags with the specified class in the passed HTML document as a list""" return get_elements_by_attribute( 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name), html, escape_value=False) def get_elements_by_attribute(attribute, value, html, escape_value=True): """Return the content of the tag with the specified attribute in the passed HTML document""" value = re.escape(value) if escape_value else value retlist = [] for m in re.finditer(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), value), html): res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] retlist.append(unescapeHTML(res)) return retlist class HTMLAttributeParser(compat_HTMLParser): """Trivial HTML parser to gather the attributes for a single element""" def __init__(self): self.attrs = {} compat_HTMLParser.__init__(self) def handle_starttag(self, tag, attrs): self.attrs = dict(attrs) def extract_attributes(html_element): """Given a string for an HTML element such as <el a="foo" B="bar" c="&98;az" d=boz empty= noval entity="&amp;" sq='"' dq="'" > Decode and return a dictionary of attributes. { 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz', 'empty': '', 'noval': None, 'entity': '&', 'sq': '"', 'dq': '\'' }. NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions, but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5. """ parser = HTMLAttributeParser() try: parser.feed(html_element) parser.close() # Older Python may throw HTMLParseError in case of malformed HTML except compat_HTMLParseError: pass return parser.attrs def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'(?u)\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'(?u)<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible. """ def replace_insane(char): if restricted and char in ACCENT_CHARS: return ACCENT_CHARS[char] if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) def sanitize_url(url): # Prepend protocol-less URLs with `http:` scheme in order to mitigate # the number of unwanted failures due to missing protocol if url.startswith('//'): return 'http:%s' % url # Fix some common typos seen so far COMMON_TYPOS = ( # https://github.com/rg3/youtube-dl/issues/15649 (r'^httpss://', r'https://'), # https://bx1.be/lives/direct-tv/ (r'^rmtp([es]?)://', r'rtmp\1://'), ) for mistake, fixup in COMMON_TYPOS: if re.match(mistake, url): return re.sub(mistake, fixup, url) return url def sanitized_Request(url, *args, **kwargs): return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs) def expand_path(s): """Expand shell variables and ~""" return os.path.expandvars(compat_expanduser(s)) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity_with_semicolon): """Transforms an HTML entity to a character.""" entity = entity_with_semicolon[:-1] # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) # TODO: HTML5 allows entities without a semicolon. For example, # '&Eacuteric' should be decoded as 'Éric'. if entity_with_semicolon in compat_html_entities_html5: return compat_html_entities_html5[entity_with_semicolon] mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 # See https://github.com/rg3/youtube-dl/issues/7518 try: return compat_chr(int(numstr, base)) except ValueError: pass # Unknown entity in name, return its literal representation return '&%s;' % entity def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible if sys.platform.startswith('java'): return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class YoutubeDLError(Exception): """Base exception for YoutubeDL errors.""" pass class ExtractorError(YoutubeDLError): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class GeoRestrictedError(ExtractorError): """Geographic restriction Error exception. This exception may be thrown when a video is not available from your geographic location due to geographic restrictions imposed by a website. """ def __init__(self, msg, countries=None): super(GeoRestrictedError, self).__init__(msg, expected=True) self.msg = msg self.countries = countries class DownloadError(YoutubeDLError): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(YoutubeDLError): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(YoutubeDLError): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): super(PostProcessingError, self).__init__(msg) self.msg = msg class MaxDownloadsReached(YoutubeDLError): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(YoutubeDLError): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(YoutubeDLError): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): super(ContentTooShortError, self).__init__( 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected) ) # Both in bytes self.downloaded = downloaded self.expected = expected class XAttrMetadataError(YoutubeDLError): def __init__(self, code=None, msg='Unknown error'): super(XAttrMetadataError, self).__init__(msg) self.code = code self.msg = msg # Parsing code and msg if (self.code in (errno.ENOSPC, errno.EDQUOT) or 'No space left' in self.msg or 'Disk quota excedded' in self.msg): self.reason = 'NO_SPACE' elif self.code == errno.E2BIG or 'Argument list too long' in self.msg: self.reason = 'VALUE_TOO_LONG' else: self.reason = 'NOT_SUPPORTED' class XAttrUnavailableError(YoutubeDLError): pass def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs['strict'] = True hc = http_class(*args, **compat_kwargs(kwargs)) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc def handle_youtubedl_headers(headers): filtered_headers = headers if 'Youtubedl-no-compression' in filtered_headers: filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') del filtered_headers['Youtubedl-no-compression'] return filtered_headers class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-no-compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): conn_class = compat_http_client.HTTPConnection socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req = update_Request(req, url=url_escaped) for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) req.headers = handle_youtubedl_headers(req.headers) if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') else: location = location.decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] if sys.version_info < (3, 0): location_escaped = location_escaped.encode('utf-8') resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response def make_socks_conn_class(base_class, socks_proxy): assert issubclass(base_class, ( compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection)) url_components = compat_urlparse.urlparse(socks_proxy) if url_components.scheme.lower() == 'socks5': socks_type = ProxyType.SOCKS5 elif url_components.scheme.lower() in ('socks', 'socks4'): socks_type = ProxyType.SOCKS4 elif url_components.scheme.lower() == 'socks4a': socks_type = ProxyType.SOCKS4A def unquote_if_non_empty(s): if not s: return s return compat_urllib_parse_unquote_plus(s) proxy_args = ( socks_type, url_components.hostname, url_components.port or 1080, True, # Remote DNS unquote_if_non_empty(url_components.username), unquote_if_non_empty(url_components.password), ) class SocksConnection(base_class): def connect(self): self.sock = sockssocket() self.sock.setproxy(*proxy_args) if type(self.timeout) in (int, float): self.sock.settimeout(self.timeout) self.sock.connect((self.host, self.port)) if isinstance(self, compat_http_client.HTTPSConnection): if hasattr(self, '_context'): # Python > 2.6 self.sock = self._context.wrap_socket( self.sock, server_hostname=self.host) else: self.sock = ssl.wrap_socket(self.sock) return SocksConnection class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} conn_class = self._https_conn_class if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, True), req, **kwargs) class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def extract_timezone(date_str): m = re.search( r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group('tz'))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) return timezone, date_str def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: timezone, date_str = extract_timezone(date_str) try: date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) except ValueError: pass def date_formats(day_first=True): return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) _, date_str = extract_timezone(date_str) for expression in date_formats(day_first): try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: try: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') except ValueError: pass if upload_date is not None: return compat_str(upload_date) def unified_timestamp(date_str, day_first=True): if date_str is None: return None date_str = re.sub(r'[,|]', '', date_str) pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0 timezone, date_str = extract_timezone(date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) # Remove unrecognized timezones from ISO 8601 alike timestamps m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str) if m: date_str = date_str[:-len(m.group('tz'))] # Python only supports microseconds, so remove nanoseconds m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str) if m: date_str = m.group(1) for expression in date_formats(day_first): try: dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta) return calendar.timegm(dt.timetuple()) except ValueError: pass timetuple = email.utils.parsedate_tz(date_str) if timetuple: return calendar.timegm(timetuple) + pm_delta * 3600 def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download elif guess.rstrip('/') in KNOWN_EXTENSIONS: return guess.rstrip('/') else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad approximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, '%Y%m%d').date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = compat_ctypes_WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( ('GetStdHandle', ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = compat_ctypes_WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)(('WriteConsoleW', ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = compat_ctypes_WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(('GetFileType', ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = compat_ctypes_WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( ('GetConsoleMode', ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return compat_struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: # Some platforms, such as Jython, is missing fcntl try: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) except ImportError: UNSUPPORTED_MSG = 'file locking is not supported on this platform' def _lock_file(f, exclusive): raise IOError(UNSUPPORTED_MSG) def _unlock_file(f): raise IOError(UNSUPPORTED_MSG) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(compat_shlex_quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ url, idata = unsmuggle_url(url, {}) data.update(idata) sdata = compat_urllib_parse_urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def lookup_unit_table(unit_table, s): units_re = '|'.join(re.escape(u) for u in unit_table) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = unit_table[m.group('unit')] return int(float(num_str) * mult) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and unofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'bytes': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'kb': 1000, 'kilobytes': 1000, 'kibibytes': 1024, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'mb': 1000 ** 2, 'megabytes': 1000 ** 2, 'mebibytes': 1024 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'gb': 1000 ** 3, 'gigabytes': 1000 ** 3, 'gibibytes': 1024 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'tb': 1000 ** 4, 'terabytes': 1000 ** 4, 'tebibytes': 1024 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'pb': 1000 ** 5, 'petabytes': 1000 ** 5, 'pebibytes': 1024 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'eb': 1000 ** 6, 'exabytes': 1000 ** 6, 'exbibytes': 1024 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'zb': 1000 ** 7, 'zettabytes': 1000 ** 7, 'zebibytes': 1024 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, 'yb': 1000 ** 8, 'yottabytes': 1000 ** 8, 'yobibytes': 1024 ** 8, } return lookup_unit_table(_UNIT_TABLE, s) def parse_count(s): if s is None: return None s = s.strip() if re.match(r'^[\d,.]+$', s): return str_to_int(s) _UNIT_TABLE = { 'k': 1000, 'K': 1000, 'm': 1000 ** 2, 'M': 1000 ** 2, 'kk': 1000 ** 2, 'KK': 1000 ** 2, } return lookup_unit_table(_UNIT_TABLE, s) def parse_resolution(s): if s is None: return {} mobj = re.search(r'\b(?P<w>\d+)\s*[xX×]\s*(?P<h>\d+)\b', s) if mobj: return { 'width': int(mobj.group('w')), 'height': int(mobj.group('h')), } mobj = re.search(r'\b(\d+)[pPiI]\b', s) if mobj: return {'height': int(mobj.group(1))} mobj = re.search(r'\b([48])[kK]\b', s) if mobj: return {'height': int(mobj.group(1)) * 540} return {} def month_by_name(name, lang='en'): """ Return the number of a month by (locale-independently) English name """ month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en']) try: return month_names.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) # ctypes in Jython is not complete # http://bugs.jython.org/issue2148 if sys.platform.startswith('java'): return try: libc = ctypes.cdll.LoadLibrary('libc.so.6') except OSError: return except TypeError: # LoadLibrary in Windows Python 2.7.13 only expects # a bytestring, but since unicode_literals turns # every string into a unicode string, it fails. return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): return s[len(start):] if s is not None and s.startswith(start) else s def remove_end(s, end): return s[:-len(end)] if s is not None and s.endswith(end) else s def remove_quotes(s): if s is None or len(s) < 2: return s for quote in ('"', "'", ): if s[0] == quote and s[-1] == quote: return s[1:-1] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] def base_url(url): return re.match(r'https?://[^?#&]+/', url).group() def urljoin(base, path): if isinstance(path, bytes): path = path.decode('utf-8') if not isinstance(path, compat_str) or not path: return None if re.match(r'^(?:https?:)?//', path): return path if isinstance(base, bytes): base = base.decode('utf-8') if not isinstance(base, compat_str) or not re.match( r'^(?:https?:)?//', base): return None return compat_urlparse.urljoin(base, path) class HEADRequest(compat_urllib_request.Request): def get_method(self): return 'HEAD' class PUTRequest(compat_urllib_request.Request): def get_method(self): return 'PUT' def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def bool_or_none(v, default=None): return v if isinstance(v, bool) else default def strip_or_none(v): return None if v is None else v.strip() def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() days, hours, mins, secs, ms = [None] * 5 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match( r'''(?ix)(?:P? (?: [0-9]+\s*y(?:ears?)?\s* )? (?: [0-9]+\s*m(?:onths?)?\s* )? (?: [0-9]+\s*w(?:eeks?)?\s* )? (?: (?P<days>[0-9]+)\s*d(?:ays?)?\s* )? T)? (?: (?P<hours>[0-9]+)\s*h(?:ours?)?\s* )? (?: (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s* )? (?: (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s* )?Z?$''', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s) if m: hours, mins = m.groups() else: return None duration = 0 if secs: duration += float(secs) if mins: duration += float(mins) * 60 if hours: duration += float(hours) * 60 * 60 if days: duration += float(days) * 24 * 60 * 60 if ms: duration += float(ms) return duration def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers # SIGTTOU if youtube-dl is run in the background. # See https://github.com/rg3/youtube-dl/issues/955#issuecomment-209789656 out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize, use_cache=True): self._pagefunc = pagefunc self._pagesize = pagesize self._use_cache = use_cache if use_cache: self._cache = {} def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = None if self._use_cache: page_results = self._cache.get(pagenum) if page_results is None: page_results = list(self._pagefunc(pagenum)) if self._use_cache: self._cache[pagenum] = page_results startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( netloc=url_parsed.netloc.encode('idna').decode('ascii'), path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii') def update_url_query(url, query): if not query: return url parsed_url = compat_urlparse.urlparse(url) qs = compat_parse_qs(parsed_url.query) qs.update(query) return compat_urlparse.urlunparse(parsed_url._replace( query=compat_urllib_parse_urlencode(qs, True))) def update_Request(req, url=None, data=None, headers={}, query={}): req_headers = req.headers.copy() req_headers.update(headers) req_data = data or req.data req_url = update_url_query(url or req.get_full_url(), query) req_get_method = req.get_method() if req_get_method == 'HEAD': req_type = HEADRequest elif req_get_method == 'PUT': req_type = PUTRequest else: req_type = compat_urllib_request.Request new_req = req_type( req_url, data=req_data, headers=req_headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) if hasattr(req, 'timeout'): new_req.timeout = req.timeout return new_req def _multipart_encode_impl(data, boundary): content_type = 'multipart/form-data; boundary=%s' % boundary out = b'' for k, v in data.items(): out += b'--' + boundary.encode('ascii') + b'\r\n' if isinstance(k, compat_str): k = k.encode('utf-8') if isinstance(v, compat_str): v = v.encode('utf-8') # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578 # suggests sending UTF-8 directly. Firefox sends UTF-8, too content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n' if boundary.encode('ascii') in content: raise ValueError('Boundary overlaps with data') out += content out += b'--' + boundary.encode('ascii') + b'--\r\n' return out, content_type def multipart_encode(data, boundary=None): ''' Encode a dict to RFC 7578-compliant form-data data: A dict where keys and values can be either Unicode or bytes-like objects. boundary: If specified a Unicode object, it's used as the boundary. Otherwise a random boundary is generated. Reference: https://tools.ietf.org/html/rfc7578 ''' has_specified_boundary = boundary is not None while True: if boundary is None: boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff)) try: out, content_type = _multipart_encode_impl(data, boundary) break except ValueError: if has_specified_boundary: raise boundary = None return out, content_type def dict_get(d, key_or_keys, default=None, skip_false_values=True): if isinstance(key_or_keys, (list, tuple)): for key in key_or_keys: if key not in d or d[key] is None or skip_false_values and not d[key]: continue return d[key] return default return d.get(key_or_keys, default) def try_get(src, getter, expected_type=None): if not isinstance(getter, (list, tuple)): getter = [getter] for get in getter: try: v = get(src) except (AttributeError, KeyError, TypeError, IndexError): pass else: if expected_type is None or isinstance(v, expected_type): return v def encode_compat_str(string, encoding=preferredencoding(), errors='strict'): return string if isinstance(string, compat_str) else compat_str(string, encoding, errors) US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } TV_PARENTAL_GUIDELINES = { 'TV-Y': 0, 'TV-Y7': 7, 'TV-G': 0, 'TV-PG': 0, 'TV-14': 14, 'TV-MA': 17, } def parse_age_limit(s): if type(s) == int: return s if 0 <= s <= 21 else None if not isinstance(s, compat_basestring): return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) if m: return int(m.group('age')) if s in US_RATINGS: return US_RATINGS[s] return TV_PARENTAL_GUIDELINES.get(s) def strip_jsonp(code): return re.sub( r'''(?sx)^ (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]+) (?:\s*&&\s*(?P=func_name))? \s*\(\s*(?P<callback_data>.*)\);? \s*?(?://[^\n]*)*$''', r'\g<callback_data>', code) def js_to_json(code): COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*' SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE) INTEGER_TABLE = ( (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16), (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8), ) def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v elif v.startswith('/*') or v.startswith('//') or v == ',': return "" if v[0] in ("'", '"'): v = re.sub(r'(?s)\\.|"', lambda m: { '"': '\\"', "\\'": "'", '\\\n': '', '\\x': '\\u00', }.get(m.group(0), m.group(0)), v[1:-1]) for regex, base in INTEGER_TABLE: im = re.match(regex, v) if im: i = int(im.group(1), base) return '"%d":' % i if v.endswith(':') else '%d' % i return '"%s"' % v return re.sub(r'''(?sx) "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'| {comment}|,(?={skip}[\]}}])| (?:(?<![0-9])[eE]|[a-df-zA-DF-Z_])[.a-zA-Z_0-9]*| \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?| [0-9]+(?={skip}:) '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code) def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(compat_shlex_quote(a) for a in args) def error_to_compat_str(err): err_str = str(err) # On python 2 error byte string must be decoded with proper # encoding rather than ascii if sys.version_info[0] < 3: err_str = err_str.decode(preferredencoding()) return err_str def mimetype2ext(mt): if mt is None: return None ext = { 'audio/mp4': 'm4a', # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as # it's the most popular one 'audio/mpeg': 'mp3', }.get(mt) if ext is not None: return ext _, _, res = mt.rpartition('/') res = res.split(';')[0].strip().lower() return { '3gpp': '3gp', 'smptett+xml': 'tt', 'ttaf+xml': 'dfxp', 'ttml+xml': 'ttml', 'x-flv': 'flv', 'x-mp4-fragmented': 'mp4', 'x-ms-sami': 'sami', 'x-ms-wmv': 'wmv', 'mpegurl': 'm3u8', 'x-mpegurl': 'm3u8', 'vnd.apple.mpegurl': 'm3u8', 'dash+xml': 'mpd', 'f4m+xml': 'f4m', 'hds+xml': 'f4m', 'vnd.ms-sstr+xml': 'ism', 'quicktime': 'mov', 'mp2t': 'ts', }.get(res, res) def parse_codecs(codecs_str): # http://tools.ietf.org/html/rfc6381 if not codecs_str: return {} splited_codecs = list(filter(None, map( lambda str: str.strip(), codecs_str.strip().strip(',').split(',')))) vcodec, acodec = None, None for full_codec in splited_codecs: codec = full_codec.split('.')[0] if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v', 'hvc1'): if not vcodec: vcodec = full_codec elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'): if not acodec: acodec = full_codec else: write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr) if not vcodec and not acodec: if len(splited_codecs) == 2: return { 'vcodec': vcodec, 'acodec': acodec, } elif len(splited_codecs) == 1: return { 'vcodec': 'none', 'acodec': vcodec, } else: return { 'vcodec': vcodec or 'none', 'acodec': acodec or 'none', } return {} def urlhandle_detect_ext(url_handle): getheader = url_handle.headers.get cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<quote>["\'])(?P<quotedstrval>(?:\\.|(?!(?P=quote)|\\).)+?)(?P=quote)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) if (m.group('quotedstrval') is not None or m.group('strval') is not None or # If the original field is a string and matching comparisonvalue is # a number we should respect the origin of the original field # and process comparison value as a string (see # https://github.com/rg3/youtube-dl/issues/11082). actual_value is not None and m.group('intval') is not None and isinstance(actual_value, compat_str)): if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval') quote = m.group('quote') if quote is not None: comparison_value = comparison_value.replace(r'\%s' % quote, quote) else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: (v is True) if isinstance(v, bool) else (v is not None), '!': lambda v: (v is False) if isinstance(v, bool) else (v is None), } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.')) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): ''' @param dfxp_data A bytes-like object containing DFXP data @returns A unicode object containing converted SRT data ''' LEGACY_NAMESPACES = ( (b'http://www.w3.org/ns/ttml', [ b'http://www.w3.org/2004/11/ttaf1', b'http://www.w3.org/2006/04/ttaf1', b'http://www.w3.org/2006/10/ttaf1', ]), (b'http://www.w3.org/ns/ttml#styling', [ b'http://www.w3.org/ns/ttml#style', ]), ) SUPPORTED_STYLING = [ 'color', 'fontFamily', 'fontSize', 'fontStyle', 'fontWeight', 'textDecoration' ] _x = functools.partial(xpath_with_ns, ns_map={ 'ttml': 'http://www.w3.org/ns/ttml', 'tts': 'http://www.w3.org/ns/ttml#styling', }) styles = {} default_style = {} class TTMLPElementParser(object): _out = '' _unclosed_elements = [] _applied_styles = [] def start(self, tag, attrib): if tag in (_x('ttml:br'), 'br'): self._out += '\n' else: unclosed_elements = [] style = {} element_style_id = attrib.get('style') if default_style: style.update(default_style) if element_style_id: style.update(styles.get(element_style_id, {})) for prop in SUPPORTED_STYLING: prop_val = attrib.get(_x('tts:' + prop)) if prop_val: style[prop] = prop_val if style: font = '' for k, v in sorted(style.items()): if self._applied_styles and self._applied_styles[-1].get(k) == v: continue if k == 'color': font += ' color="%s"' % v elif k == 'fontSize': font += ' size="%s"' % v elif k == 'fontFamily': font += ' face="%s"' % v elif k == 'fontWeight' and v == 'bold': self._out += '<b>' unclosed_elements.append('b') elif k == 'fontStyle' and v == 'italic': self._out += '<i>' unclosed_elements.append('i') elif k == 'textDecoration' and v == 'underline': self._out += '<u>' unclosed_elements.append('u') if font: self._out += '<font' + font + '>' unclosed_elements.append('font') applied_style = {} if self._applied_styles: applied_style.update(self._applied_styles[-1]) applied_style.update(style) self._applied_styles.append(applied_style) self._unclosed_elements.append(unclosed_elements) def end(self, tag): if tag not in (_x('ttml:br'), 'br'): unclosed_elements = self._unclosed_elements.pop() for element in reversed(unclosed_elements): self._out += '</%s>' % element if unclosed_elements and self._applied_styles: self._applied_styles.pop() def data(self, data): self._out += data def close(self): return self._out.strip() def parse_node(node): target = TTMLPElementParser() parser = xml.etree.ElementTree.XMLParser(target=target) parser.feed(xml.etree.ElementTree.tostring(node)) return parser.close() for k, v in LEGACY_NAMESPACES: for ns in v: dfxp_data = dfxp_data.replace(ns, k) dfxp = compat_etree_fromstring(dfxp_data) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') repeat = False while True: for style in dfxp.findall(_x('.//ttml:style')): style_id = style.get('id') parent_style_id = style.get('style') if parent_style_id: if parent_style_id not in styles: repeat = True continue styles[style_id] = styles[parent_style_id].copy() for prop in SUPPORTED_STYLING: prop_val = style.get(_x('tts:' + prop)) if prop_val: styles.setdefault(style_id, {})[prop] = prop_val if repeat: repeat = False else: break for p in ('body', 'div'): ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p]) if ele is None: continue style = styles.get(ele.get('style')) if not style: continue default_style.update(style) for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) dur = parse_dfxp_time_expr(para.attrib.get('dur')) if begin_time is None: continue if not end_time: if not dur: continue end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) if param: param = compat_str(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) if param is None: return [] assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class GeoUtils(object): # Major IPv4 address blocks per country _country_ip_map = { 'AD': '85.94.160.0/19', 'AE': '94.200.0.0/13', 'AF': '149.54.0.0/17', 'AG': '209.59.64.0/18', 'AI': '204.14.248.0/21', 'AL': '46.99.0.0/16', 'AM': '46.70.0.0/15', 'AO': '105.168.0.0/13', 'AP': '159.117.192.0/21', 'AR': '181.0.0.0/12', 'AS': '202.70.112.0/20', 'AT': '84.112.0.0/13', 'AU': '1.128.0.0/11', 'AW': '181.41.0.0/18', 'AZ': '5.191.0.0/16', 'BA': '31.176.128.0/17', 'BB': '65.48.128.0/17', 'BD': '114.130.0.0/16', 'BE': '57.0.0.0/8', 'BF': '129.45.128.0/17', 'BG': '95.42.0.0/15', 'BH': '37.131.0.0/17', 'BI': '154.117.192.0/18', 'BJ': '137.255.0.0/16', 'BL': '192.131.134.0/24', 'BM': '196.12.64.0/18', 'BN': '156.31.0.0/16', 'BO': '161.56.0.0/16', 'BQ': '161.0.80.0/20', 'BR': '152.240.0.0/12', 'BS': '24.51.64.0/18', 'BT': '119.2.96.0/19', 'BW': '168.167.0.0/16', 'BY': '178.120.0.0/13', 'BZ': '179.42.192.0/18', 'CA': '99.224.0.0/11', 'CD': '41.243.0.0/16', 'CF': '196.32.200.0/21', 'CG': '197.214.128.0/17', 'CH': '85.0.0.0/13', 'CI': '154.232.0.0/14', 'CK': '202.65.32.0/19', 'CL': '152.172.0.0/14', 'CM': '165.210.0.0/15', 'CN': '36.128.0.0/10', 'CO': '181.240.0.0/12', 'CR': '201.192.0.0/12', 'CU': '152.206.0.0/15', 'CV': '165.90.96.0/19', 'CW': '190.88.128.0/17', 'CY': '46.198.0.0/15', 'CZ': '88.100.0.0/14', 'DE': '53.0.0.0/8', 'DJ': '197.241.0.0/17', 'DK': '87.48.0.0/12', 'DM': '192.243.48.0/20', 'DO': '152.166.0.0/15', 'DZ': '41.96.0.0/12', 'EC': '186.68.0.0/15', 'EE': '90.190.0.0/15', 'EG': '156.160.0.0/11', 'ER': '196.200.96.0/20', 'ES': '88.0.0.0/11', 'ET': '196.188.0.0/14', 'EU': '2.16.0.0/13', 'FI': '91.152.0.0/13', 'FJ': '144.120.0.0/16', 'FM': '119.252.112.0/20', 'FO': '88.85.32.0/19', 'FR': '90.0.0.0/9', 'GA': '41.158.0.0/15', 'GB': '25.0.0.0/8', 'GD': '74.122.88.0/21', 'GE': '31.146.0.0/16', 'GF': '161.22.64.0/18', 'GG': '62.68.160.0/19', 'GH': '45.208.0.0/14', 'GI': '85.115.128.0/19', 'GL': '88.83.0.0/19', 'GM': '160.182.0.0/15', 'GN': '197.149.192.0/18', 'GP': '104.250.0.0/19', 'GQ': '105.235.224.0/20', 'GR': '94.64.0.0/13', 'GT': '168.234.0.0/16', 'GU': '168.123.0.0/16', 'GW': '197.214.80.0/20', 'GY': '181.41.64.0/18', 'HK': '113.252.0.0/14', 'HN': '181.210.0.0/16', 'HR': '93.136.0.0/13', 'HT': '148.102.128.0/17', 'HU': '84.0.0.0/14', 'ID': '39.192.0.0/10', 'IE': '87.32.0.0/12', 'IL': '79.176.0.0/13', 'IM': '5.62.80.0/20', 'IN': '117.192.0.0/10', 'IO': '203.83.48.0/21', 'IQ': '37.236.0.0/14', 'IR': '2.176.0.0/12', 'IS': '82.221.0.0/16', 'IT': '79.0.0.0/10', 'JE': '87.244.64.0/18', 'JM': '72.27.0.0/17', 'JO': '176.29.0.0/16', 'JP': '126.0.0.0/8', 'KE': '105.48.0.0/12', 'KG': '158.181.128.0/17', 'KH': '36.37.128.0/17', 'KI': '103.25.140.0/22', 'KM': '197.255.224.0/20', 'KN': '198.32.32.0/19', 'KP': '175.45.176.0/22', 'KR': '175.192.0.0/10', 'KW': '37.36.0.0/14', 'KY': '64.96.0.0/15', 'KZ': '2.72.0.0/13', 'LA': '115.84.64.0/18', 'LB': '178.135.0.0/16', 'LC': '192.147.231.0/24', 'LI': '82.117.0.0/19', 'LK': '112.134.0.0/15', 'LR': '41.86.0.0/19', 'LS': '129.232.0.0/17', 'LT': '78.56.0.0/13', 'LU': '188.42.0.0/16', 'LV': '46.109.0.0/16', 'LY': '41.252.0.0/14', 'MA': '105.128.0.0/11', 'MC': '88.209.64.0/18', 'MD': '37.246.0.0/16', 'ME': '178.175.0.0/17', 'MF': '74.112.232.0/21', 'MG': '154.126.0.0/17', 'MH': '117.103.88.0/21', 'MK': '77.28.0.0/15', 'ML': '154.118.128.0/18', 'MM': '37.111.0.0/17', 'MN': '49.0.128.0/17', 'MO': '60.246.0.0/16', 'MP': '202.88.64.0/20', 'MQ': '109.203.224.0/19', 'MR': '41.188.64.0/18', 'MS': '208.90.112.0/22', 'MT': '46.11.0.0/16', 'MU': '105.16.0.0/12', 'MV': '27.114.128.0/18', 'MW': '105.234.0.0/16', 'MX': '187.192.0.0/11', 'MY': '175.136.0.0/13', 'MZ': '197.218.0.0/15', 'NA': '41.182.0.0/16', 'NC': '101.101.0.0/18', 'NE': '197.214.0.0/18', 'NF': '203.17.240.0/22', 'NG': '105.112.0.0/12', 'NI': '186.76.0.0/15', 'NL': '145.96.0.0/11', 'NO': '84.208.0.0/13', 'NP': '36.252.0.0/15', 'NR': '203.98.224.0/19', 'NU': '49.156.48.0/22', 'NZ': '49.224.0.0/14', 'OM': '5.36.0.0/15', 'PA': '186.72.0.0/15', 'PE': '186.160.0.0/14', 'PF': '123.50.64.0/18', 'PG': '124.240.192.0/19', 'PH': '49.144.0.0/13', 'PK': '39.32.0.0/11', 'PL': '83.0.0.0/11', 'PM': '70.36.0.0/20', 'PR': '66.50.0.0/16', 'PS': '188.161.0.0/16', 'PT': '85.240.0.0/13', 'PW': '202.124.224.0/20', 'PY': '181.120.0.0/14', 'QA': '37.210.0.0/15', 'RE': '139.26.0.0/16', 'RO': '79.112.0.0/13', 'RS': '178.220.0.0/14', 'RU': '5.136.0.0/13', 'RW': '105.178.0.0/15', 'SA': '188.48.0.0/13', 'SB': '202.1.160.0/19', 'SC': '154.192.0.0/11', 'SD': '154.96.0.0/13', 'SE': '78.64.0.0/12', 'SG': '152.56.0.0/14', 'SI': '188.196.0.0/14', 'SK': '78.98.0.0/15', 'SL': '197.215.0.0/17', 'SM': '89.186.32.0/19', 'SN': '41.82.0.0/15', 'SO': '197.220.64.0/19', 'SR': '186.179.128.0/17', 'SS': '105.235.208.0/21', 'ST': '197.159.160.0/19', 'SV': '168.243.0.0/16', 'SX': '190.102.0.0/20', 'SY': '5.0.0.0/16', 'SZ': '41.84.224.0/19', 'TC': '65.255.48.0/20', 'TD': '154.68.128.0/19', 'TG': '196.168.0.0/14', 'TH': '171.96.0.0/13', 'TJ': '85.9.128.0/18', 'TK': '27.96.24.0/21', 'TL': '180.189.160.0/20', 'TM': '95.85.96.0/19', 'TN': '197.0.0.0/11', 'TO': '175.176.144.0/21', 'TR': '78.160.0.0/11', 'TT': '186.44.0.0/15', 'TV': '202.2.96.0/19', 'TW': '120.96.0.0/11', 'TZ': '156.156.0.0/14', 'UA': '93.72.0.0/13', 'UG': '154.224.0.0/13', 'US': '3.0.0.0/8', 'UY': '167.56.0.0/13', 'UZ': '82.215.64.0/18', 'VA': '212.77.0.0/19', 'VC': '24.92.144.0/20', 'VE': '186.88.0.0/13', 'VG': '172.103.64.0/18', 'VI': '146.226.0.0/16', 'VN': '14.160.0.0/11', 'VU': '202.80.32.0/20', 'WF': '117.20.32.0/21', 'WS': '202.4.32.0/19', 'YE': '134.35.0.0/16', 'YT': '41.242.116.0/22', 'ZA': '41.0.0.0/11', 'ZM': '165.56.0.0/13', 'ZW': '41.85.192.0/19', } @classmethod def random_ipv4(cls, code): block = cls._country_ip_map.get(code.upper()) if not block: return None addr, preflen = block.split('/') addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0] addr_max = addr_min | (0xffffffff >> int(preflen)) return compat_str(socket.inet_ntoa( compat_struct_pack('!L', random.randint(addr_min, addr_max)))) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) return compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'): req.add_header('Ytdl-socks-proxy', proxy) # youtube-dl's http/https handlers do wrapping the socket with socks return None return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type) # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is # released into Public Domain # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387 def long_to_bytes(n, blocksize=0): """long_to_bytes(n:long, blocksize:int) : string Convert a long integer to a byte string. If optional blocksize is given and greater than zero, pad the front of the byte string with binary zeros so that the length is a multiple of blocksize. """ # after much testing, this algorithm was deemed to be the fastest s = b'' n = int(n) while n > 0: s = compat_struct_pack('>I', n & 0xffffffff) + s n = n >> 32 # strip off leading zeros for i in range(len(s)): if s[i] != b'\000'[0]: break else: # only happens when n == 0 s = b'\000' i = 0 s = s[i:] # add back some pad bytes. this could be done more efficiently w.r.t. the # de-padding being done above, but sigh... if blocksize > 0 and len(s) % blocksize: s = (blocksize - len(s) % blocksize) * b'\000' + s return s def bytes_to_long(s): """bytes_to_long(string) : long Convert a byte string to a long integer. This is (essentially) the inverse of long_to_bytes(). """ acc = 0 length = len(s) if length % 4: extra = (4 - length % 4) s = b'\000' * extra + s length = length + extra for i in range(0, length, 4): acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0] return acc def ohdave_rsa_encrypt(data, exponent, modulus): ''' Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/ Input: data: data to encrypt, bytes-like object exponent, modulus: parameter e and N of RSA algorithm, both integer Output: hex string of encrypted data Limitation: supports one block encryption only ''' payload = int(binascii.hexlify(data[::-1]), 16) encrypted = pow(payload, exponent, modulus) return '%x' % encrypted def pkcs1pad(data, length): """ Padding input data with PKCS#1 scheme @param {int[]} data input data @param {int} length target length @returns {int[]} padded data """ if len(data) > length - 11: raise ValueError('Input data too long for PKCS#1 padding') pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)] return [0, 2] + pseudo_random + [0] + data def encode_base_n(num, n, table=None): FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' if not table: table = FULL_TABLE[:n] if n > len(table): raise ValueError('base %d exceeds table length %d' % (n, len(table))) if num == 0: return table[0] ret = '' while num: ret = table[num % n] + ret num = num // n return ret def decode_packed_codes(code): mobj = re.search(PACKED_CODES_RE, code) obfucasted_code, base, count, symbols = mobj.groups() base = int(base) count = int(count) symbols = symbols.split('|') symbol_table = {} while count: count -= 1 base_n_count = encode_base_n(count, base) symbol_table[base_n_count] = symbols[count] or base_n_count return re.sub( r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)], obfucasted_code) def parse_m3u8_attributes(attrib): info = {} for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib): if val.startswith('"'): val = val[1:-1] info[key] = val return info def urshift(val, n): return val >> n if val >= 0 else (val + 0x100000000) >> n # Based on png2str() written by @gdkchan and improved by @yokrysty # Originally posted at https://github.com/rg3/youtube-dl/issues/9706 def decode_png(png_data): # Reference: https://www.w3.org/TR/PNG/ header = png_data[8:] if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR': raise IOError('Not a valid PNG file.') int_map = {1: '>B', 2: '>H', 4: '>I'} unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0] chunks = [] while header: length = unpack_integer(header[:4]) header = header[4:] chunk_type = header[:4] header = header[4:] chunk_data = header[:length] header = header[length:] header = header[4:] # Skip CRC chunks.append({ 'type': chunk_type, 'length': length, 'data': chunk_data }) ihdr = chunks[0]['data'] width = unpack_integer(ihdr[:4]) height = unpack_integer(ihdr[4:8]) idat = b'' for chunk in chunks: if chunk['type'] == b'IDAT': idat += chunk['data'] if not idat: raise IOError('Unable to read PNG data.') decompressed_data = bytearray(zlib.decompress(idat)) stride = width * 3 pixels = [] def _get_pixel(idx): x = idx % stride y = idx // stride return pixels[y][x] for y in range(height): basePos = y * (1 + stride) filter_type = decompressed_data[basePos] current_row = [] pixels.append(current_row) for x in range(stride): color = decompressed_data[1 + basePos + x] basex = y * stride + x left = 0 up = 0 if x > 2: left = _get_pixel(basex - 3) if y > 0: up = _get_pixel(basex - stride) if filter_type == 1: # Sub color = (color + left) & 0xff elif filter_type == 2: # Up color = (color + up) & 0xff elif filter_type == 3: # Average color = (color + ((left + up) >> 1)) & 0xff elif filter_type == 4: # Paeth a = left b = up c = 0 if x > 2 and y > 0: c = _get_pixel(basex - stride - 3) p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: color = (color + a) & 0xff elif pb <= pc: color = (color + b) & 0xff else: color = (color + c) & 0xff current_row.append(color) return width, height, pixels def write_xattr(path, key, value): # This mess below finds the best xattr tool for the job try: # try the pyxattr module... import xattr if hasattr(xattr, 'set'): # pyxattr # Unicode arguments are not supported in python-pyxattr until # version 0.5.0 # See https://github.com/rg3/youtube-dl/issues/5498 pyxattr_required_version = '0.5.0' if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version): # TODO: fallback to CLI tools raise XAttrUnavailableError( 'python-pyxattr is detected but is too old. ' 'youtube-dl requires %s or above while your version is %s. ' 'Falling back to other xattr implementations' % ( pyxattr_required_version, xattr.__version__)) setxattr = xattr.set else: # xattr setxattr = xattr.setxattr try: setxattr(path, key, value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) except ImportError: if compat_os_name == 'nt': # Write xattrs to NTFS Alternate Data Streams: # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29 assert ':' not in key assert os.path.exists(path) ads_fn = path + ':' + key try: with open(ads_fn, 'wb') as f: f.write(value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) else: user_has_setfattr = check_executable('setfattr', ['--version']) user_has_xattr = check_executable('xattr', ['-h']) if user_has_setfattr or user_has_xattr: value = value.decode('utf-8') if user_has_setfattr: executable = 'setfattr' opts = ['-n', key, '-v', value] elif user_has_xattr: executable = 'xattr' opts = ['-w', key, value] cmd = ([encodeFilename(executable, True)] + [encodeArgument(o) for o in opts] + [encodeFilename(path, True)]) try: p = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) stdout, stderr = p.communicate() stderr = stderr.decode('utf-8', 'replace') if p.returncode != 0: raise XAttrMetadataError(p.returncode, stderr) else: # On Unix, and can't find pyxattr, setfattr, or xattr. if sys.platform.startswith('linux'): raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'pyxattr' or 'xattr' " "modules, or the GNU 'attr' package " "(which contains the 'setfattr' tool).") else: raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'xattr' module, " "or the 'xattr' binary.") def random_birthday(year_field, month_field, day_field): return { year_field: str(random.randint(1950, 1995)), month_field: str(random.randint(1, 12)), day_field: str(random.randint(1, 31)), }
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-1-fixed/youtube-dl/youtube_dl/utils.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-1-buggy/youtube-dl/youtube_dl/utils.py
youtube-dl-bug-7
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals import base64 import calendar import codecs import contextlib import ctypes import datetime import email.utils import errno import functools import gzip import itertools import io import json import locale import math import operator import os import pipes import platform import re import ssl import socket import struct import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_basestring, compat_chr, compat_html_entities, compat_http_client, compat_kwargs, compat_parse_qs, compat_socket_create_connection, compat_str, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlparse, compat_urllib_request, compat_urlparse, shlex_quote, ) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/20.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) if val: assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): # Here comes the crazy part: In 2.6, if the xpath is a unicode, # .//node does not match if a node is a direct child of . ! if isinstance(xpath, compat_str): xpath = xpath.encode('ascii') for f in node.findall(xpath): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): if sys.version_info < (2, 7): # Crazy 2.6 xpath = xpath.encode('ascii') n = node.find(xpath) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute("id", id, html) def get_element_by_attribute(attribute, value, html): """Return the content of the tag with the specified attribute in the passed HTML document""" m = re.search(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), re.escape(value)), html) if not m: return None res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] return unescapeHTML(res) def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|\.$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity): """Transforms an HTML entity to a character.""" # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 return compat_chr(int(numstr, base)) # Unknown entity in name, return its literal representation return ('&%s;' % entity) def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+);', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): # Both in bytes self.downloaded = downloaded self.expected = expected def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs[b'strict'] = True hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-No-Compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): return self.do_open(functools.partial( _create_http_connection, self, compat_http_client.HTTPConnection, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request new_req = req_type( url_escaped, data=req.data, headers=req.headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) new_req.timeout = req.timeout req = new_req for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) if 'Youtubedl-no-compression' in req.headers: if 'Accept-encoding' in req.headers: del req.headers['Accept-encoding'] del req.headers['Youtubedl-no-compression'] if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname return self.do_open(functools.partial( _create_http_connection, self, self._https_conn_class, True), req, **kwargs) class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None if timezone is None: m = re.search( r'(\.[0-9]+)?(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group(0))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # %z (UTC offset) is only supported in python>=3.2 if not re.match(r'^[0-9]{1,2}-[0-9]{1,2}-[0-9]{4}$', date_str): date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) format_expressions = [ '%d %B %Y', '%d %b %Y', '%B %d %Y', '%b %d %Y', '%b %dst %Y %I:%M%p', '%b %dnd %Y %I:%M%p', '%b %dth %Y %I:%M%p', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', ] if day_first: format_expressions.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) else: format_expressions.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) for expression in format_expressions: try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') return upload_date def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad aproximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, "%Y%m%d").date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( (b"GetStdHandle", ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)((b"WriteConsoleW", ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b"GetFileType", ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( (b"GetConsoleMode", ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ sdata = compat_urllib_parse.urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and inofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, } units_re = '|'.join(re.escape(u) for u in _UNIT_TABLE) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = _UNIT_TABLE[m.group('unit')] return int(float(num_str) * mult) def month_by_name(name): """ Return the number of a month by (locale-independently) English name """ try: return ENGLISH_MONTH_NAMES.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) try: libc = ctypes.cdll.LoadLibrary("libc.so.6") except OSError: return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): if s.startswith(start): return s[len(start):] return s def remove_end(s, end): if s.endswith(end): return s[:-len(end)] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] class HEADRequest(compat_urllib_request.Request): def get_method(self): return "HEAD" def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() m = re.match( r'''(?ix)(?:P?T)? (?: (?P<only_mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*| (?P<only_hours>[0-9.]+)\s*(?:hours?)| \s*(?P<hours_reversed>[0-9]+)\s*(?:[:h]|hours?)\s*(?P<mins_reversed>[0-9]+)\s*(?:[:m]|mins?\.?|minutes?)\s*| (?: (?: (?:(?P<days>[0-9]+)\s*(?:[:d]|days?)\s*)? (?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s* )? (?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s* )? (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)? )$''', s) if not m: return None res = 0 if m.group('only_mins'): return float_or_none(m.group('only_mins'), invscale=60) if m.group('only_hours'): return float_or_none(m.group('only_hours'), invscale=60 * 60) if m.group('secs'): res += int(m.group('secs')) if m.group('mins_reversed'): res += int(m.group('mins_reversed')) * 60 if m.group('mins'): res += int(m.group('mins')) * 60 if m.group('hours'): res += int(m.group('hours')) * 60 * 60 if m.group('hours_reversed'): res += int(m.group('hours_reversed')) * 60 * 60 if m.group('days'): res += int(m.group('days')) * 24 * 60 * 60 if m.group('ms'): res += float(m.group('ms')) return res def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize): self._pagefunc = pagefunc self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = list(self._pagefunc(pagenum)) startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() try: struct.pack('!I', 0) except TypeError: # In Python 2.6 (and some 2.7 versions), struct requires a bytes argument def struct_pack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.pack(spec, *args) def struct_unpack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.unpack(spec, *args) else: struct_pack = struct.pack struct_unpack = struct.unpack def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii') def encode_dict(d, encoding='utf-8'): return dict((k.encode(encoding), v.encode(encoding)) for k, v in d.items()) try: etree_iter = xml.etree.ElementTree.Element.iter except AttributeError: # Python <=2.6 etree_iter = lambda n: n.findall('.//*') def parse_xml(s): class TreeBuilder(xml.etree.ElementTree.TreeBuilder): def doctype(self, name, pubid, system): pass # Ignore doctypes parser = xml.etree.ElementTree.XMLParser(target=TreeBuilder()) kwargs = {'parser': parser} if sys.version_info >= (2, 7) else {} tree = xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs) # Fix up XML parser in Python 2.x if sys.version_info < (3, 0): for n in etree_iter(tree): if n.text is not None: if not isinstance(n.text, compat_str): n.text = n.text.decode('utf-8') return tree US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } def parse_age_limit(s): if s is None: return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) return int(m.group('age')) if m else US_RATINGS.get(s, None) def strip_jsonp(code): return re.sub( r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code) def js_to_json(code): def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v if v.startswith('"'): return v if v.startswith("'"): v = v[1:-1] v = re.sub(r"\\\\|\\'|\"", lambda m: { '\\\\': '\\\\', "\\'": "'", '"': '\\"', }[m.group(0)], v) return '"%s"' % v res = re.sub(r'''(?x) "(?:[^"\\]*(?:\\\\|\\['"nu]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nu]))*[^'\\]*'| [a-zA-Z_][.a-zA-Z_0-9]* ''', fix_kv, code) res = re.sub(r',(\s*[\]}])', lambda m: m.group(1), res) return res def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(shlex_quote(a) for a in args) def mimetype2ext(mt): _, _, res = mt.rpartition('/') return { 'x-ms-wmv': 'wmv', 'x-mp4-fragmented': 'mp4', 'ttml+xml': 'ttml', }.get(res, res) def urlhandle_detect_ext(url_handle): try: url_handle.headers getheader = lambda h: url_handle.headers[h] except AttributeError: # Python < 3 getheader = url_handle.info().getheader cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] if m.group('strval') is not None: if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('strval') else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) actual_value = dct.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return 0.0 mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:\.\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3)) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): _x = functools.partial(xpath_with_ns, ns_map={ 'ttml': 'http://www.w3.org/ns/ttml', 'ttaf1': 'http://www.w3.org/2006/10/ttaf1', }) def parse_node(node): str_or_empty = functools.partial(str_or_none, default='') out = str_or_empty(node.text) for child in node: if child.tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'): out += '\n' + str_or_empty(child.tail) elif child.tag in (_x('ttml:span'), _x('ttaf1:span'), 'span'): out += str_or_empty(parse_node(child)) else: out += str_or_empty(xml.etree.ElementTree.tostring(child)) return out dfxp = xml.etree.ElementTree.fromstring(dfxp_data.encode('utf-8')) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib['begin']) end_time = parse_dfxp_time_expr(para.attrib.get('end')) if not end_time: end_time = begin_time + parse_dfxp_time_expr(para.attrib['dur']) out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) return compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type) #!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals import base64 import calendar import codecs import contextlib import ctypes import datetime import email.utils import errno import functools import gzip import itertools import io import json import locale import math import operator import os import pipes import platform import re import ssl import socket import struct import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_basestring, compat_chr, compat_html_entities, compat_http_client, compat_kwargs, compat_parse_qs, compat_socket_create_connection, compat_str, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlparse, compat_urllib_request, compat_urlparse, shlex_quote, ) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/20.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) if val: assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): # Here comes the crazy part: In 2.6, if the xpath is a unicode, # .//node does not match if a node is a direct child of . ! if isinstance(xpath, compat_str): xpath = xpath.encode('ascii') for f in node.findall(xpath): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): if sys.version_info < (2, 7): # Crazy 2.6 xpath = xpath.encode('ascii') n = node.find(xpath) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute("id", id, html) def get_element_by_attribute(attribute, value, html): """Return the content of the tag with the specified attribute in the passed HTML document""" m = re.search(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), re.escape(value)), html) if not m: return None res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] return unescapeHTML(res) def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|\.$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity): """Transforms an HTML entity to a character.""" # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 return compat_chr(int(numstr, base)) # Unknown entity in name, return its literal representation return ('&%s;' % entity) def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+);', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): # Both in bytes self.downloaded = downloaded self.expected = expected def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs[b'strict'] = True hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-No-Compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): return self.do_open(functools.partial( _create_http_connection, self, compat_http_client.HTTPConnection, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request new_req = req_type( url_escaped, data=req.data, headers=req.headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) new_req.timeout = req.timeout req = new_req for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) if 'Youtubedl-no-compression' in req.headers: if 'Accept-encoding' in req.headers: del req.headers['Accept-encoding'] del req.headers['Youtubedl-no-compression'] if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname return self.do_open(functools.partial( _create_http_connection, self, self._https_conn_class, True), req, **kwargs) class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None if timezone is None: m = re.search( r'(\.[0-9]+)?(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group(0))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # %z (UTC offset) is only supported in python>=3.2 if not re.match(r'^[0-9]{1,2}-[0-9]{1,2}-[0-9]{4}$', date_str): date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) format_expressions = [ '%d %B %Y', '%d %b %Y', '%B %d %Y', '%b %d %Y', '%b %dst %Y %I:%M%p', '%b %dnd %Y %I:%M%p', '%b %dth %Y %I:%M%p', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', ] if day_first: format_expressions.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) else: format_expressions.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) for expression in format_expressions: try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') return upload_date def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad aproximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, "%Y%m%d").date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( (b"GetStdHandle", ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)((b"WriteConsoleW", ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b"GetFileType", ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( (b"GetConsoleMode", ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ sdata = compat_urllib_parse.urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and inofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, } units_re = '|'.join(re.escape(u) for u in _UNIT_TABLE) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = _UNIT_TABLE[m.group('unit')] return int(float(num_str) * mult) def month_by_name(name): """ Return the number of a month by (locale-independently) English name """ try: return ENGLISH_MONTH_NAMES.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) try: libc = ctypes.cdll.LoadLibrary("libc.so.6") except OSError: return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): if s.startswith(start): return s[len(start):] return s def remove_end(s, end): if s.endswith(end): return s[:-len(end)] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] class HEADRequest(compat_urllib_request.Request): def get_method(self): return "HEAD" def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() m = re.match( r'''(?ix)(?:P?T)? (?: (?P<only_mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*| (?P<only_hours>[0-9.]+)\s*(?:hours?)| \s*(?P<hours_reversed>[0-9]+)\s*(?:[:h]|hours?)\s*(?P<mins_reversed>[0-9]+)\s*(?:[:m]|mins?\.?|minutes?)\s*| (?: (?: (?:(?P<days>[0-9]+)\s*(?:[:d]|days?)\s*)? (?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s* )? (?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s* )? (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)? )$''', s) if not m: return None res = 0 if m.group('only_mins'): return float_or_none(m.group('only_mins'), invscale=60) if m.group('only_hours'): return float_or_none(m.group('only_hours'), invscale=60 * 60) if m.group('secs'): res += int(m.group('secs')) if m.group('mins_reversed'): res += int(m.group('mins_reversed')) * 60 if m.group('mins'): res += int(m.group('mins')) * 60 if m.group('hours'): res += int(m.group('hours')) * 60 * 60 if m.group('hours_reversed'): res += int(m.group('hours_reversed')) * 60 * 60 if m.group('days'): res += int(m.group('days')) * 24 * 60 * 60 if m.group('ms'): res += float(m.group('ms')) return res def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize): self._pagefunc = pagefunc self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = list(self._pagefunc(pagenum)) startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() try: struct.pack('!I', 0) except TypeError: # In Python 2.6 (and some 2.7 versions), struct requires a bytes argument def struct_pack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.pack(spec, *args) def struct_unpack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.unpack(spec, *args) else: struct_pack = struct.pack struct_unpack = struct.unpack def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii') def encode_dict(d, encoding='utf-8'): return dict((k.encode(encoding), v.encode(encoding)) for k, v in d.items()) try: etree_iter = xml.etree.ElementTree.Element.iter except AttributeError: # Python <=2.6 etree_iter = lambda n: n.findall('.//*') def parse_xml(s): class TreeBuilder(xml.etree.ElementTree.TreeBuilder): def doctype(self, name, pubid, system): pass # Ignore doctypes parser = xml.etree.ElementTree.XMLParser(target=TreeBuilder()) kwargs = {'parser': parser} if sys.version_info >= (2, 7) else {} tree = xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs) # Fix up XML parser in Python 2.x if sys.version_info < (3, 0): for n in etree_iter(tree): if n.text is not None: if not isinstance(n.text, compat_str): n.text = n.text.decode('utf-8') return tree US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } def parse_age_limit(s): if s is None: return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) return int(m.group('age')) if m else US_RATINGS.get(s, None) def strip_jsonp(code): return re.sub( r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code) def js_to_json(code): def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v if v.startswith('"'): v = re.sub(r"\\'", "'", v[1:-1]) elif v.startswith("'"): v = v[1:-1] v = re.sub(r"\\\\|\\'|\"", lambda m: { '\\\\': '\\\\', "\\'": "'", '"': '\\"', }[m.group(0)], v) return '"%s"' % v res = re.sub(r'''(?x) "(?:[^"\\]*(?:\\\\|\\['"nu]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nu]))*[^'\\]*'| [a-zA-Z_][.a-zA-Z_0-9]* ''', fix_kv, code) res = re.sub(r',(\s*[\]}])', lambda m: m.group(1), res) return res def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(shlex_quote(a) for a in args) def mimetype2ext(mt): _, _, res = mt.rpartition('/') return { 'x-ms-wmv': 'wmv', 'x-mp4-fragmented': 'mp4', 'ttml+xml': 'ttml', }.get(res, res) def urlhandle_detect_ext(url_handle): try: url_handle.headers getheader = lambda h: url_handle.headers[h] except AttributeError: # Python < 3 getheader = url_handle.info().getheader cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] if m.group('strval') is not None: if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('strval') else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) actual_value = dct.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return 0.0 mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:\.\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3)) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): _x = functools.partial(xpath_with_ns, ns_map={ 'ttml': 'http://www.w3.org/ns/ttml', 'ttaf1': 'http://www.w3.org/2006/10/ttaf1', }) def parse_node(node): str_or_empty = functools.partial(str_or_none, default='') out = str_or_empty(node.text) for child in node: if child.tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'): out += '\n' + str_or_empty(child.tail) elif child.tag in (_x('ttml:span'), _x('ttaf1:span'), 'span'): out += str_or_empty(parse_node(child)) else: out += str_or_empty(xml.etree.ElementTree.tostring(child)) return out dfxp = xml.etree.ElementTree.fromstring(dfxp_data.encode('utf-8')) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib['begin']) end_time = parse_dfxp_time_expr(para.attrib.get('end')) if not end_time: end_time = begin_time + parse_dfxp_time_expr(para.attrib['dur']) out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) return compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type)
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-7-fixed/youtube-dl/youtube_dl/utils.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-7-buggy/youtube-dl/youtube_dl/utils.py
youtube-dl-bug-43
#!/usr/bin/env python # -*- coding: utf-8 -*- import ctypes import datetime import email.utils import errno import gzip import io import json import locale import math import os import pipes import platform import re import ssl import socket import subprocess import sys import traceback import zlib try: import urllib.request as compat_urllib_request except ImportError: # Python 2 import urllib2 as compat_urllib_request try: import urllib.error as compat_urllib_error except ImportError: # Python 2 import urllib2 as compat_urllib_error try: import urllib.parse as compat_urllib_parse except ImportError: # Python 2 import urllib as compat_urllib_parse try: from urllib.parse import urlparse as compat_urllib_parse_urlparse except ImportError: # Python 2 from urlparse import urlparse as compat_urllib_parse_urlparse try: import urllib.parse as compat_urlparse except ImportError: # Python 2 import urlparse as compat_urlparse try: import http.cookiejar as compat_cookiejar except ImportError: # Python 2 import cookielib as compat_cookiejar try: import html.entities as compat_html_entities except ImportError: # Python 2 import htmlentitydefs as compat_html_entities try: import html.parser as compat_html_parser except ImportError: # Python 2 import HTMLParser as compat_html_parser try: import http.client as compat_http_client except ImportError: # Python 2 import httplib as compat_http_client try: from urllib.error import HTTPError as compat_HTTPError except ImportError: # Python 2 from urllib2 import HTTPError as compat_HTTPError try: from urllib.request import urlretrieve as compat_urlretrieve except ImportError: # Python 2 from urllib import urlretrieve as compat_urlretrieve try: from subprocess import DEVNULL compat_subprocess_get_DEVNULL = lambda: DEVNULL except ImportError: compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w') try: from urllib.parse import parse_qs as compat_parse_qs except ImportError: # Python 2 # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib. # Python 2's version is apparently totally broken def _unquote(string, encoding='utf-8', errors='replace'): if string == '': return string res = string.split('%') if len(res) == 1: return string if encoding is None: encoding = 'utf-8' if errors is None: errors = 'replace' # pct_sequence: contiguous sequence of percent-encoded bytes, decoded pct_sequence = b'' string = res[0] for item in res[1:]: try: if not item: raise ValueError pct_sequence += item[:2].decode('hex') rest = item[2:] if not rest: # This segment was just a single percent-encoded character. # May be part of a sequence of code units, so delay decoding. # (Stored in pct_sequence). continue except ValueError: rest = '%' + item # Encountered non-percent-encoded characters. Flush the current # pct_sequence. string += pct_sequence.decode(encoding, errors) + rest pct_sequence = b'' if pct_sequence: # Flush the final pct_sequence string += pct_sequence.decode(encoding, errors) return string def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace'): qs, _coerce_result = qs, unicode pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] r = [] for name_value in pairs: if not name_value and not strict_parsing: continue nv = name_value.split('=', 1) if len(nv) != 2: if strict_parsing: raise ValueError("bad query field: %r" % (name_value,)) # Handle case of a control-name with no equal sign if keep_blank_values: nv.append('') else: continue if len(nv[1]) or keep_blank_values: name = nv[0].replace('+', ' ') name = _unquote(name, encoding=encoding, errors=errors) name = _coerce_result(name) value = nv[1].replace('+', ' ') value = _unquote(value, encoding=encoding, errors=errors) value = _coerce_result(value) r.append((name, value)) return r def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace'): parsed_result = {} pairs = _parse_qsl(qs, keep_blank_values, strict_parsing, encoding=encoding, errors=errors) for name, value in pairs: if name in parsed_result: parsed_result[name].append(value) else: parsed_result[name] = [value] return parsed_result try: compat_str = unicode # Python 2 except NameError: compat_str = str try: compat_chr = unichr # Python 2 except NameError: compat_chr = chr def compat_ord(c): if type(c) is int: return c else: return ord(c) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() u'TEST'.encode(pref) except: pref = 'UTF-8' return pref if sys.version_info < (3,0): def compat_print(s): print(s.encode(preferredencoding(), 'xmlcharrefreplace')) else: def compat_print(s): assert type(s) == type(u'') print(s) # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3,0): def write_json_file(obj, fn): with open(fn, 'wb') as f: json.dump(obj, f) else: def write_json_file(obj, fn): with open(fn, 'w', encoding='utf-8') as f: json.dump(obj, f) if sys.version_info >= (2,7): def find_xpath_attr(node, xpath, key, val): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z]+$', key) assert re.match(r'^[a-zA-Z0-9@\s]*$', val) expr = xpath + u"[@%s='%s']" % (key, val) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val): for f in node.findall(xpath): if f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def htmlentity_transform(matchobj): """Transforms an HTML entity to a character. This function receives a match object and is intended to be used with the re.sub() function. """ entity = matchobj.group(1) # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) mobj = re.match(u'(?u)#(x?\\d+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith(u'x'): base = 16 numstr = u'0%s' % numstr else: base = 10 return compat_chr(int(numstr, base)) # Unknown entity in name, return its literal representation return (u'&%s;' % entity) compat_html_parser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix class BaseHTMLParser(compat_html_parser.HTMLParser): def __init(self): compat_html_parser.HTMLParser.__init__(self) self.html = None def loads(self, html): self.html = html self.feed(html) self.close() class AttrParser(BaseHTMLParser): """Modified HTMLParser that isolates a tag with the specified attribute""" def __init__(self, attribute, value): self.attribute = attribute self.value = value self.result = None self.started = False self.depth = {} self.watch_startpos = False self.error_count = 0 BaseHTMLParser.__init__(self) def error(self, message): if self.error_count > 10 or self.started: raise compat_html_parser.HTMLParseError(message, self.getpos()) self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line self.error_count += 1 self.goahead(1) def handle_starttag(self, tag, attrs): attrs = dict(attrs) if self.started: self.find_startpos(None) if self.attribute in attrs and attrs[self.attribute] == self.value: self.result = [tag] self.started = True self.watch_startpos = True if self.started: if not tag in self.depth: self.depth[tag] = 0 self.depth[tag] += 1 def handle_endtag(self, tag): if self.started: if tag in self.depth: self.depth[tag] -= 1 if self.depth[self.result[0]] == 0: self.started = False self.result.append(self.getpos()) def find_startpos(self, x): """Needed to put the start position of the result (self.result[1]) after the opening tag with the requested id""" if self.watch_startpos: self.watch_startpos = False self.result.append(self.getpos()) handle_entityref = handle_charref = handle_data = handle_comment = \ handle_decl = handle_pi = unknown_decl = find_startpos def get_result(self): if self.result is None: return None if len(self.result) != 3: return None lines = self.html.split('\n') lines = lines[self.result[1][0]-1:self.result[2][0]] lines[0] = lines[0][self.result[1][1]:] if len(lines) == 1: lines[-1] = lines[-1][:self.result[2][1]-self.result[1][1]] lines[-1] = lines[-1][:self.result[2][1]] return '\n'.join(lines).strip() # Hack for https://github.com/rg3/youtube-dl/issues/662 if sys.version_info < (2, 7, 3): AttrParser.parse_endtag = (lambda self, i: i + len("</scr'+'ipt>") if self.rawdata[i:].startswith("</scr'+'ipt>") else compat_html_parser.HTMLParser.parse_endtag(self, i)) def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute("id", id, html) def get_element_by_attribute(attribute, value, html): """Return the content of the tag with the specified attribute in the passed HTML document""" parser = AttrParser(attribute, value) try: parser.loads(html) except compat_html_parser.HTMLParseError: pass return parser.get_result() class MetaParser(BaseHTMLParser): """ Modified HTMLParser that isolates a meta tag with the specified name attribute. """ def __init__(self, name): BaseHTMLParser.__init__(self) self.name = name self.content = None self.result = None def handle_starttag(self, tag, attrs): if tag != 'meta': return attrs = dict(attrs) if attrs.get('name') == self.name: self.result = attrs.get('content') def get_result(self): return self.result def get_meta_content(name, html): """ Return the content attribute from the meta tag with the given name attribute. """ parser = MetaParser(name) try: parser.loads(html) except compat_html_parser.HTMLParseError: pass return parser.get_result() def clean_html(html): """Clean an HTML snippet into a readable string""" # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == u'-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = os.path.join( re.sub(u'[/<>:"\\|\\\\?\\*]', u'#', path_part) for path_part in os.path.split(filename) ) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char result = u''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if not result: result = '_' return result def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def unescapeHTML(s): """ @param s a string """ assert type(s) == type(u'') result = re.sub(u'(?u)&(.+?);', htmlentity_transform, s) return result def encodeFilename(s): """ @param s The name of the file """ assert type(s) == type(u'') # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # Pass u'' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) return s else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return s.encode(encoding, 'ignore') def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(opts_no_check_certificate): if sys.version_info < (3, 2): import httplib class HTTPSConnectionV3(httplib.HTTPSConnection): def __init__(self, *args, **kwargs): httplib.HTTPSConnection.__init__(self, *args, **kwargs) def connect(self): sock = socket.create_connection((self.host, self.port), self.timeout) if getattr(self, '_tunnel_host', False): self.sock = sock self._tunnel() try: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv3) except ssl.SSLError: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv23) class HTTPSHandlerV3(compat_urllib_request.HTTPSHandler): def https_open(self, req): return self.do_open(HTTPSConnectionV3, req) return HTTPSHandlerV3() else: context = ssl.SSLContext(ssl.PROTOCOL_SSLv3) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() try: context.load_default_certs() except AttributeError: pass # Python < 3.4 return compat_urllib_request.HTTPSHandler(context=context) class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if not expected: msg = msg + u'; please report this issue on https://yt-dl.org/bug . Be sure to call youtube-dl with the --verbose flag and include its complete output. Make sure you are using the latest version; type youtube-dl -U to update.' super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause def format_traceback(self): if self.traceback is None: return None return u''.join(traceback.format_tb(self.traceback)) class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ # Both in bytes downloaded = None expected = None def __init__(self, downloaded, expected): self.downloaded = downloaded self.expected = expected class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-No-Compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): for h,v in std_headers.items(): if h in req.headers: del req.headers[h] req.add_header(h, v) if 'Youtubedl-no-compression' in req.headers: if 'Accept-encoding' in req.headers: del req.headers['Accept-encoding'] del req.headers['Youtubedl-no-compression'] if 'Youtubedl-user-agent' in req.headers: if 'User-agent' in req.headers: del req.headers['User-agent'] req.headers['User-agent'] = req.headers['Youtubedl-user-agent'] del req.headers['Youtubedl-user-agent'] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg return resp https_request = http_request https_response = http_response def unified_strdate(date_str): """Return a string with the date in the format YYYYMMDD""" upload_date = None #Replace commas date_str = date_str.replace(',',' ') # %z (UTC offset) is only supported in python>=3.2 date_str = re.sub(r' (\+|-)[\d]*$', '', date_str) format_expressions = [ '%d %B %Y', '%B %d %Y', '%b %d %Y', '%Y-%m-%d', '%d/%m/%Y', '%Y/%m/%d %H:%M:%S', '%d.%m.%Y %H:%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', ] for expression in format_expressions: try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except: pass return upload_date def determine_ext(url, default_ext=u'unknown_video'): guess = url.partition(u'?')[0].rpartition(u'.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str == 'now'or date_str == 'today': return today match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') #A bad aproximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, "%Y%m%d").date() class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day,day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % ( self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def write_string(s, out=None): if out is None: out = sys.stderr assert type(s) == type(u'') if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr s = s.encode(preferredencoding(), 'ignore') out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' if isinstance(chr(0), bytes): # Python 2 return ''.join([chr(x) for x in xs]) else: return bytes(xs) def get_cachedir(params={}): cache_root = os.environ.get('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) return params.get('cachedir', os.path.join(cache_root, 'youtube-dl')) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: import fcntl def _lock_file(f, exclusive): fcntl.lockf(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.lockf(f, fcntl.LOCK_UN) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def shell_quote(args): quoted_args = [] encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return u' '.join(quoted_args) def takewhile_inclusive(pred, seq): """ Like itertools.takewhile, but include the latest evaluated element (the first element so that Not pred(e)) """ for e in seq: yield e if not pred(e): return def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ sdata = compat_urllib_parse.urlencode( {u'__youtubedl_smuggle': json.dumps(data)}) return url + u'#' + sdata def unsmuggle_url(smug_url): if not '#__youtubedl_smuggle' in smug_url: return smug_url, None url, _, sdata = smug_url.rpartition(u'#') jsond = compat_parse_qs(sdata)[u'__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return u'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = [u'B', u'KiB', u'MiB', u'GiB', u'TiB', u'PiB', u'EiB', u'ZiB', u'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return u'%.2f%s' % (converted, suffix) def str_to_int(int_str): int_str = re.sub(r'[,\.]', u'', int_str) return int(int_str) def get_term_width(): columns = os.environ.get('COLUMNS', None) if columns: return int(columns) try: sp = subprocess.Popen( ['stty', 'size'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = sp.communicate() return int(out.split()[1]) except: pass return None def month_by_name(name): """ Return the number of a month by (locale-independently) English name """ ENGLISH_NAMES = [ u'January', u'February', u'March', u'April', u'May', u'June', u'July', u'August', u'September', u'October', u'November', u'December'] try: return ENGLISH_NAMES.index(name) + 1 except ValueError: return None def fix_xml_all_ampersand(xml_str): """Replace all the '&' by '&amp;' in XML""" return xml_str.replace(u'&', u'&amp;') def setproctitle(title): assert isinstance(title, type(u'')) try: libc = ctypes.cdll.LoadLibrary("libc.so.6") except OSError: return title = title buf = ctypes.create_string_buffer(len(title) + 1) buf.value = title.encode('utf-8') try: libc.prctl(15, ctypes.byref(buf), 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): if s.startswith(start): return s[len(start):] return s def url_basename(url): m = re.match(r'(?:https?:|)//[^/]+/(?:[^/?#]+/)?([^/?#]+)/?(?:[?#]|$)', url) if not m: return u'' return m.group(1) #!/usr/bin/env python # -*- coding: utf-8 -*- import ctypes import datetime import email.utils import errno import gzip import io import json import locale import math import os import pipes import platform import re import ssl import socket import subprocess import sys import traceback import zlib try: import urllib.request as compat_urllib_request except ImportError: # Python 2 import urllib2 as compat_urllib_request try: import urllib.error as compat_urllib_error except ImportError: # Python 2 import urllib2 as compat_urllib_error try: import urllib.parse as compat_urllib_parse except ImportError: # Python 2 import urllib as compat_urllib_parse try: from urllib.parse import urlparse as compat_urllib_parse_urlparse except ImportError: # Python 2 from urlparse import urlparse as compat_urllib_parse_urlparse try: import urllib.parse as compat_urlparse except ImportError: # Python 2 import urlparse as compat_urlparse try: import http.cookiejar as compat_cookiejar except ImportError: # Python 2 import cookielib as compat_cookiejar try: import html.entities as compat_html_entities except ImportError: # Python 2 import htmlentitydefs as compat_html_entities try: import html.parser as compat_html_parser except ImportError: # Python 2 import HTMLParser as compat_html_parser try: import http.client as compat_http_client except ImportError: # Python 2 import httplib as compat_http_client try: from urllib.error import HTTPError as compat_HTTPError except ImportError: # Python 2 from urllib2 import HTTPError as compat_HTTPError try: from urllib.request import urlretrieve as compat_urlretrieve except ImportError: # Python 2 from urllib import urlretrieve as compat_urlretrieve try: from subprocess import DEVNULL compat_subprocess_get_DEVNULL = lambda: DEVNULL except ImportError: compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w') try: from urllib.parse import parse_qs as compat_parse_qs except ImportError: # Python 2 # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib. # Python 2's version is apparently totally broken def _unquote(string, encoding='utf-8', errors='replace'): if string == '': return string res = string.split('%') if len(res) == 1: return string if encoding is None: encoding = 'utf-8' if errors is None: errors = 'replace' # pct_sequence: contiguous sequence of percent-encoded bytes, decoded pct_sequence = b'' string = res[0] for item in res[1:]: try: if not item: raise ValueError pct_sequence += item[:2].decode('hex') rest = item[2:] if not rest: # This segment was just a single percent-encoded character. # May be part of a sequence of code units, so delay decoding. # (Stored in pct_sequence). continue except ValueError: rest = '%' + item # Encountered non-percent-encoded characters. Flush the current # pct_sequence. string += pct_sequence.decode(encoding, errors) + rest pct_sequence = b'' if pct_sequence: # Flush the final pct_sequence string += pct_sequence.decode(encoding, errors) return string def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace'): qs, _coerce_result = qs, unicode pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] r = [] for name_value in pairs: if not name_value and not strict_parsing: continue nv = name_value.split('=', 1) if len(nv) != 2: if strict_parsing: raise ValueError("bad query field: %r" % (name_value,)) # Handle case of a control-name with no equal sign if keep_blank_values: nv.append('') else: continue if len(nv[1]) or keep_blank_values: name = nv[0].replace('+', ' ') name = _unquote(name, encoding=encoding, errors=errors) name = _coerce_result(name) value = nv[1].replace('+', ' ') value = _unquote(value, encoding=encoding, errors=errors) value = _coerce_result(value) r.append((name, value)) return r def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace'): parsed_result = {} pairs = _parse_qsl(qs, keep_blank_values, strict_parsing, encoding=encoding, errors=errors) for name, value in pairs: if name in parsed_result: parsed_result[name].append(value) else: parsed_result[name] = [value] return parsed_result try: compat_str = unicode # Python 2 except NameError: compat_str = str try: compat_chr = unichr # Python 2 except NameError: compat_chr = chr def compat_ord(c): if type(c) is int: return c else: return ord(c) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() u'TEST'.encode(pref) except: pref = 'UTF-8' return pref if sys.version_info < (3,0): def compat_print(s): print(s.encode(preferredencoding(), 'xmlcharrefreplace')) else: def compat_print(s): assert type(s) == type(u'') print(s) # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3,0): def write_json_file(obj, fn): with open(fn, 'wb') as f: json.dump(obj, f) else: def write_json_file(obj, fn): with open(fn, 'w', encoding='utf-8') as f: json.dump(obj, f) if sys.version_info >= (2,7): def find_xpath_attr(node, xpath, key, val): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z]+$', key) assert re.match(r'^[a-zA-Z0-9@\s]*$', val) expr = xpath + u"[@%s='%s']" % (key, val) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val): for f in node.findall(xpath): if f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def htmlentity_transform(matchobj): """Transforms an HTML entity to a character. This function receives a match object and is intended to be used with the re.sub() function. """ entity = matchobj.group(1) # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) mobj = re.match(u'(?u)#(x?\\d+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith(u'x'): base = 16 numstr = u'0%s' % numstr else: base = 10 return compat_chr(int(numstr, base)) # Unknown entity in name, return its literal representation return (u'&%s;' % entity) compat_html_parser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix class BaseHTMLParser(compat_html_parser.HTMLParser): def __init(self): compat_html_parser.HTMLParser.__init__(self) self.html = None def loads(self, html): self.html = html self.feed(html) self.close() class AttrParser(BaseHTMLParser): """Modified HTMLParser that isolates a tag with the specified attribute""" def __init__(self, attribute, value): self.attribute = attribute self.value = value self.result = None self.started = False self.depth = {} self.watch_startpos = False self.error_count = 0 BaseHTMLParser.__init__(self) def error(self, message): if self.error_count > 10 or self.started: raise compat_html_parser.HTMLParseError(message, self.getpos()) self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line self.error_count += 1 self.goahead(1) def handle_starttag(self, tag, attrs): attrs = dict(attrs) if self.started: self.find_startpos(None) if self.attribute in attrs and attrs[self.attribute] == self.value: self.result = [tag] self.started = True self.watch_startpos = True if self.started: if not tag in self.depth: self.depth[tag] = 0 self.depth[tag] += 1 def handle_endtag(self, tag): if self.started: if tag in self.depth: self.depth[tag] -= 1 if self.depth[self.result[0]] == 0: self.started = False self.result.append(self.getpos()) def find_startpos(self, x): """Needed to put the start position of the result (self.result[1]) after the opening tag with the requested id""" if self.watch_startpos: self.watch_startpos = False self.result.append(self.getpos()) handle_entityref = handle_charref = handle_data = handle_comment = \ handle_decl = handle_pi = unknown_decl = find_startpos def get_result(self): if self.result is None: return None if len(self.result) != 3: return None lines = self.html.split('\n') lines = lines[self.result[1][0]-1:self.result[2][0]] lines[0] = lines[0][self.result[1][1]:] if len(lines) == 1: lines[-1] = lines[-1][:self.result[2][1]-self.result[1][1]] lines[-1] = lines[-1][:self.result[2][1]] return '\n'.join(lines).strip() # Hack for https://github.com/rg3/youtube-dl/issues/662 if sys.version_info < (2, 7, 3): AttrParser.parse_endtag = (lambda self, i: i + len("</scr'+'ipt>") if self.rawdata[i:].startswith("</scr'+'ipt>") else compat_html_parser.HTMLParser.parse_endtag(self, i)) def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute("id", id, html) def get_element_by_attribute(attribute, value, html): """Return the content of the tag with the specified attribute in the passed HTML document""" parser = AttrParser(attribute, value) try: parser.loads(html) except compat_html_parser.HTMLParseError: pass return parser.get_result() class MetaParser(BaseHTMLParser): """ Modified HTMLParser that isolates a meta tag with the specified name attribute. """ def __init__(self, name): BaseHTMLParser.__init__(self) self.name = name self.content = None self.result = None def handle_starttag(self, tag, attrs): if tag != 'meta': return attrs = dict(attrs) if attrs.get('name') == self.name: self.result = attrs.get('content') def get_result(self): return self.result def get_meta_content(name, html): """ Return the content attribute from the meta tag with the given name attribute. """ parser = MetaParser(name) try: parser.loads(html) except compat_html_parser.HTMLParseError: pass return parser.get_result() def clean_html(html): """Clean an HTML snippet into a readable string""" # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == u'-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = os.path.join( re.sub(u'[/<>:"\\|\\\\?\\*]', u'#', path_part) for path_part in os.path.split(filename) ) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char result = u''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if not result: result = '_' return result def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def unescapeHTML(s): """ @param s a string """ assert type(s) == type(u'') result = re.sub(u'(?u)&(.+?);', htmlentity_transform, s) return result def encodeFilename(s): """ @param s The name of the file """ assert type(s) == type(u'') # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # Pass u'' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) return s else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return s.encode(encoding, 'ignore') def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(opts_no_check_certificate): if sys.version_info < (3, 2): import httplib class HTTPSConnectionV3(httplib.HTTPSConnection): def __init__(self, *args, **kwargs): httplib.HTTPSConnection.__init__(self, *args, **kwargs) def connect(self): sock = socket.create_connection((self.host, self.port), self.timeout) if getattr(self, '_tunnel_host', False): self.sock = sock self._tunnel() try: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv3) except ssl.SSLError: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv23) class HTTPSHandlerV3(compat_urllib_request.HTTPSHandler): def https_open(self, req): return self.do_open(HTTPSConnectionV3, req) return HTTPSHandlerV3() else: context = ssl.SSLContext(ssl.PROTOCOL_SSLv3) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() try: context.load_default_certs() except AttributeError: pass # Python < 3.4 return compat_urllib_request.HTTPSHandler(context=context) class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if not expected: msg = msg + u'; please report this issue on https://yt-dl.org/bug . Be sure to call youtube-dl with the --verbose flag and include its complete output. Make sure you are using the latest version; type youtube-dl -U to update.' super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause def format_traceback(self): if self.traceback is None: return None return u''.join(traceback.format_tb(self.traceback)) class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ # Both in bytes downloaded = None expected = None def __init__(self, downloaded, expected): self.downloaded = downloaded self.expected = expected class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-No-Compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): for h,v in std_headers.items(): if h in req.headers: del req.headers[h] req.add_header(h, v) if 'Youtubedl-no-compression' in req.headers: if 'Accept-encoding' in req.headers: del req.headers['Accept-encoding'] del req.headers['Youtubedl-no-compression'] if 'Youtubedl-user-agent' in req.headers: if 'User-agent' in req.headers: del req.headers['User-agent'] req.headers['User-agent'] = req.headers['Youtubedl-user-agent'] del req.headers['Youtubedl-user-agent'] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg return resp https_request = http_request https_response = http_response def unified_strdate(date_str): """Return a string with the date in the format YYYYMMDD""" upload_date = None #Replace commas date_str = date_str.replace(',',' ') # %z (UTC offset) is only supported in python>=3.2 date_str = re.sub(r' (\+|-)[\d]*$', '', date_str) format_expressions = [ '%d %B %Y', '%B %d %Y', '%b %d %Y', '%Y-%m-%d', '%d/%m/%Y', '%Y/%m/%d %H:%M:%S', '%d.%m.%Y %H:%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', ] for expression in format_expressions: try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except: pass return upload_date def determine_ext(url, default_ext=u'unknown_video'): guess = url.partition(u'?')[0].rpartition(u'.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str == 'now'or date_str == 'today': return today match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') #A bad aproximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, "%Y%m%d").date() class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day,day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % ( self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def write_string(s, out=None): if out is None: out = sys.stderr assert type(s) == type(u'') if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr s = s.encode(preferredencoding(), 'ignore') out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' if isinstance(chr(0), bytes): # Python 2 return ''.join([chr(x) for x in xs]) else: return bytes(xs) def get_cachedir(params={}): cache_root = os.environ.get('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) return params.get('cachedir', os.path.join(cache_root, 'youtube-dl')) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: import fcntl def _lock_file(f, exclusive): fcntl.lockf(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.lockf(f, fcntl.LOCK_UN) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def shell_quote(args): quoted_args = [] encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return u' '.join(quoted_args) def takewhile_inclusive(pred, seq): """ Like itertools.takewhile, but include the latest evaluated element (the first element so that Not pred(e)) """ for e in seq: yield e if not pred(e): return def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ sdata = compat_urllib_parse.urlencode( {u'__youtubedl_smuggle': json.dumps(data)}) return url + u'#' + sdata def unsmuggle_url(smug_url): if not '#__youtubedl_smuggle' in smug_url: return smug_url, None url, _, sdata = smug_url.rpartition(u'#') jsond = compat_parse_qs(sdata)[u'__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return u'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = [u'B', u'KiB', u'MiB', u'GiB', u'TiB', u'PiB', u'EiB', u'ZiB', u'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return u'%.2f%s' % (converted, suffix) def str_to_int(int_str): int_str = re.sub(r'[,\.]', u'', int_str) return int(int_str) def get_term_width(): columns = os.environ.get('COLUMNS', None) if columns: return int(columns) try: sp = subprocess.Popen( ['stty', 'size'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = sp.communicate() return int(out.split()[1]) except: pass return None def month_by_name(name): """ Return the number of a month by (locale-independently) English name """ ENGLISH_NAMES = [ u'January', u'February', u'March', u'April', u'May', u'June', u'July', u'August', u'September', u'October', u'November', u'December'] try: return ENGLISH_NAMES.index(name) + 1 except ValueError: return None def fix_xml_all_ampersand(xml_str): """Replace all the '&' by '&amp;' in XML""" return xml_str.replace(u'&', u'&amp;') def setproctitle(title): assert isinstance(title, type(u'')) try: libc = ctypes.cdll.LoadLibrary("libc.so.6") except OSError: return title = title buf = ctypes.create_string_buffer(len(title) + 1) buf.value = title.encode('utf-8') try: libc.prctl(15, ctypes.byref(buf), 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): if s.startswith(start): return s[len(start):] return s def url_basename(url): m = re.match(r'(?:https?:|)//[^/]+/(?:[^?#]+/)?([^/?#]+)/?(?:[?#]|$)', url) if not m: return u'' return m.group(1)
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-43-fixed/youtube-dl/youtube_dl/utils.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-43-buggy/youtube-dl/youtube_dl/utils.py
youtube-dl-bug-18
#!/usr/bin/env python # coding: utf-8 from __future__ import absolute_import, unicode_literals import collections import contextlib import copy import datetime import errno import fileinput import io import itertools import json import locale import operator import os import platform import re import shutil import subprocess import socket import sys import time import tokenize import traceback import random from string import ascii_letters from .compat import ( compat_basestring, compat_cookiejar, compat_get_terminal_size, compat_http_client, compat_kwargs, compat_numeric_types, compat_os_name, compat_str, compat_tokenize_tokenize, compat_urllib_error, compat_urllib_request, compat_urllib_request_DataHandler, ) from .utils import ( age_restricted, args_to_str, ContentTooShortError, date_from_str, DateRange, DEFAULT_OUTTMPL, determine_ext, determine_protocol, DownloadError, encode_compat_str, encodeFilename, error_to_compat_str, expand_path, ExtractorError, format_bytes, formatSeconds, GeoRestrictedError, int_or_none, ISO3166Utils, locked_file, make_HTTPS_handler, MaxDownloadsReached, PagedList, parse_filesize, PerRequestProxyHandler, platform_name, PostProcessingError, preferredencoding, prepend_extension, register_socks_protocols, render_table, replace_extension, SameFileError, sanitize_filename, sanitize_path, sanitize_url, sanitized_Request, std_headers, subtitles_filename, UnavailableVideoError, url_basename, version_tuple, write_json_file, write_string, YoutubeDLCookieProcessor, YoutubeDLHandler, ) from .cache import Cache from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER from .downloader import get_suitable_downloader from .downloader.rtmp import rtmpdump_version from .postprocessor import ( FFmpegFixupM3u8PP, FFmpegFixupM4aPP, FFmpegFixupStretchedPP, FFmpegMergerPP, FFmpegPostProcessor, get_postprocessor, ) from .version import __version__ if compat_os_name == 'nt': import ctypes class YoutubeDL(object): """YoutubeDL class. YoutubeDL objects are the ones responsible of downloading the actual video file and writing it to disk if the user has requested it, among some other tasks. In most cases there should be one per program. As, given a video URL, the downloader doesn't know how to extract all the needed information, task that InfoExtractors do, it has to pass the URL to one of them. For this, YoutubeDL objects have a method that allows InfoExtractors to be registered in a given order. When it is passed a URL, the YoutubeDL object handles it to the first InfoExtractor it finds that reports being able to handle it. The InfoExtractor extracts all the information about the video or videos the URL refers to, and YoutubeDL process the extracted information, possibly using a File Downloader to download the video. YoutubeDL objects accept a lot of parameters. In order not to saturate the object constructor with arguments, it receives a dictionary of options instead. These options are available through the params attribute for the InfoExtractors to use. The YoutubeDL also registers itself as the downloader in charge for the InfoExtractors that are added to it, so this is a "mutual registration". Available options: username: Username for authentication purposes. password: Password for authentication purposes. videopassword: Password for accessing a video. ap_mso: Adobe Pass multiple-system operator identifier. ap_username: Multiple-system operator account username. ap_password: Multiple-system operator account password. usenetrc: Use netrc for authentication instead. verbose: Print additional info to stdout. quiet: Do not print messages to stdout. no_warnings: Do not print out anything for warnings. forceurl: Force printing final URL. forcetitle: Force printing title. forceid: Force printing ID. forcethumbnail: Force printing thumbnail URL. forcedescription: Force printing description. forcefilename: Force printing final filename. forceduration: Force printing duration. forcejson: Force printing info_dict as JSON. dump_single_json: Force printing the info_dict of the whole playlist (or video) as a single JSON line. simulate: Do not download the video files. format: Video format code. See options.py for more information. outtmpl: Template for output names. restrictfilenames: Do not allow "&" and spaces in file names ignoreerrors: Do not stop on download errors. force_generic_extractor: Force downloader to use the generic extractor nooverwrites: Prevent overwriting files. playliststart: Playlist item to start at. playlistend: Playlist item to end at. playlist_items: Specific indices of playlist to download. playlistreverse: Download playlist items in reverse order. playlistrandom: Download playlist items in random order. matchtitle: Download only matching titles. rejecttitle: Reject downloads for matching titles. logger: Log messages to a logging.Logger instance. logtostderr: Log messages to stderr instead of stdout. writedescription: Write the video description to a .description file writeinfojson: Write the video description to a .info.json file writeannotations: Write the video annotations to a .annotations.xml file writethumbnail: Write the thumbnail image to a file write_all_thumbnails: Write all thumbnail formats to files writesubtitles: Write the video subtitles to a file writeautomaticsub: Write the automatically generated subtitles to a file allsubtitles: Downloads all the subtitles of the video (requires writesubtitles or writeautomaticsub) listsubtitles: Lists all available subtitles for the video subtitlesformat: The format code for subtitles subtitleslangs: List of languages of the subtitles to download keepvideo: Keep the video file after post-processing daterange: A DateRange object, download only if the upload_date is in the range. skip_download: Skip the actual download of the video file cachedir: Location of the cache files in the filesystem. False to disable filesystem cache. noplaylist: Download single video instead of a playlist if in doubt. age_limit: An integer representing the user's age in years. Unsuitable videos for the given age are skipped. min_views: An integer representing the minimum view count the video must have in order to not be skipped. Videos without view count information are always downloaded. None for no limit. max_views: An integer representing the maximum view count. Videos that are more popular than that are not downloaded. Videos without view count information are always downloaded. None for no limit. download_archive: File name of a file where all downloads are recorded. Videos already present in the file are not downloaded again. cookiefile: File name where cookies should be read from and dumped to. nocheckcertificate:Do not verify SSL certificates prefer_insecure: Use HTTP instead of HTTPS to retrieve information. At the moment, this is only supported by YouTube. proxy: URL of the proxy server to use geo_verification_proxy: URL of the proxy to use for IP address verification on geo-restricted sites. (Experimental) socket_timeout: Time to wait for unresponsive hosts, in seconds bidi_workaround: Work around buggy terminals without bidirectional text support, using fridibi debug_printtraffic:Print out sent and received HTTP traffic include_ads: Download ads as well default_search: Prepend this string if an input url is not valid. 'auto' for elaborate guessing encoding: Use this encoding instead of the system-specified. extract_flat: Do not resolve URLs, return the immediate result. Pass in 'in_playlist' to only show this behavior for playlist items. postprocessors: A list of dictionaries, each with an entry * key: The name of the postprocessor. See youtube_dl/postprocessor/__init__.py for a list. as well as any further keyword arguments for the postprocessor. progress_hooks: A list of functions that get called on download progress, with a dictionary with the entries * status: One of "downloading", "error", or "finished". Check this first and ignore unknown values. If status is one of "downloading", or "finished", the following properties may also be present: * filename: The final filename (always present) * tmpfilename: The filename we're currently writing to * downloaded_bytes: Bytes on disk * total_bytes: Size of the whole file, None if unknown * total_bytes_estimate: Guess of the eventual file size, None if unavailable. * elapsed: The number of seconds since download started. * eta: The estimated time in seconds, None if unknown * speed: The download speed in bytes/second, None if unknown * fragment_index: The counter of the currently downloaded video fragment. * fragment_count: The number of fragments (= individual files that will be merged) Progress hooks are guaranteed to be called at least once (with status "finished") if the download is successful. merge_output_format: Extension to use when merging formats. fixup: Automatically correct known faults of the file. One of: - "never": do nothing - "warn": only emit a warning - "detect_or_warn": check whether we can do anything about it, warn otherwise (default) source_address: (Experimental) Client-side IP address to bind to. call_home: Boolean, true iff we are allowed to contact the youtube-dl servers for debugging. sleep_interval: Number of seconds to sleep before each download when used alone or a lower bound of a range for randomized sleep before each download (minimum possible number of seconds to sleep) when used along with max_sleep_interval. max_sleep_interval:Upper bound of a range for randomized sleep before each download (maximum possible number of seconds to sleep). Must only be used along with sleep_interval. Actual sleep time will be a random float from range [sleep_interval; max_sleep_interval]. listformats: Print an overview of available video formats and exit. list_thumbnails: Print a table of all thumbnails and exit. match_filter: A function that gets called with the info_dict of every video. If it returns a message, the video is ignored. If it returns None, the video is downloaded. match_filter_func in utils.py is one example for this. no_color: Do not emit color codes in output. geo_bypass: Bypass geographic restriction via faking X-Forwarded-For HTTP header (experimental) geo_bypass_country: Two-letter ISO 3166-2 country code that will be used for explicit geographic restriction bypassing via faking X-Forwarded-For HTTP header (experimental) The following options determine which downloader is picked: external_downloader: Executable of the external downloader to call. None or unset for standard (built-in) downloader. hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv if True, otherwise use ffmpeg/avconv if False, otherwise use downloader suggested by extractor if None. The following parameters are not used by YoutubeDL itself, they are used by the downloader (see youtube_dl/downloader/common.py): nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test, noresizebuffer, retries, continuedl, noprogress, consoletitle, xattr_set_filesize, external_downloader_args, hls_use_mpegts. The following options are used by the post processors: prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available, otherwise prefer avconv. postprocessor_args: A list of additional command-line arguments for the postprocessor. """ _NUMERIC_FIELDS = set(( 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx', 'timestamp', 'upload_year', 'upload_month', 'upload_day', 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count', 'average_rating', 'comment_count', 'age_limit', 'start_time', 'end_time', 'chapter_number', 'season_number', 'episode_number', 'track_number', 'disc_number', 'release_year', 'playlist_index', )) params = None _ies = [] _pps = [] _download_retcode = None _num_downloads = None _screen_file = None def __init__(self, params=None, auto_init=True): """Create a FileDownloader object with the given options.""" if params is None: params = {} self._ies = [] self._ies_instances = {} self._pps = [] self._progress_hooks = [] self._download_retcode = 0 self._num_downloads = 0 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] self._err_file = sys.stderr self.params = { # Default parameters 'nocheckcertificate': False, } self.params.update(params) self.cache = Cache(self) def check_deprecated(param, option, suggestion): if self.params.get(param) is not None: self.report_warning( '%s is deprecated. Use %s instead.' % (option, suggestion)) return True return False if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'): if self.params.get('geo_verification_proxy') is None: self.params['geo_verification_proxy'] = self.params['cn_verification_proxy'] check_deprecated('autonumber_size', '--autonumber-size', 'output template with %(autonumber)0Nd, where N in the number of digits') check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"') check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"') if params.get('bidi_workaround', False): try: import pty master, slave = pty.openpty() width = compat_get_terminal_size().columns if width is None: width_args = [] else: width_args = ['-w', str(width)] sp_kwargs = dict( stdin=subprocess.PIPE, stdout=slave, stderr=self._err_file) try: self._output_process = subprocess.Popen( ['bidiv'] + width_args, **sp_kwargs ) except OSError: self._output_process = subprocess.Popen( ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs) self._output_channel = os.fdopen(master, 'rb') except OSError as ose: if ose.errno == errno.ENOENT: self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.') else: raise if (sys.platform != 'win32' and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] and not params.get('restrictfilenames', False)): # Unicode filesystem API will throw errors (#1474, #13027) self.report_warning( 'Assuming --restrict-filenames since file system encoding ' 'cannot encode all characters. ' 'Set the LC_ALL environment variable to fix this.') self.params['restrictfilenames'] = True if isinstance(params.get('outtmpl'), bytes): self.report_warning( 'Parameter outtmpl is bytes, but should be a unicode string. ' 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.') self._setup_opener() if auto_init: self.print_debug_header() self.add_default_info_extractors() for pp_def_raw in self.params.get('postprocessors', []): pp_class = get_postprocessor(pp_def_raw['key']) pp_def = dict(pp_def_raw) del pp_def['key'] pp = pp_class(self, **compat_kwargs(pp_def)) self.add_post_processor(pp) for ph in self.params.get('progress_hooks', []): self.add_progress_hook(ph) register_socks_protocols() def warn_if_short_id(self, argv): # short YouTube ID starting with dash? idxs = [ i for i, a in enumerate(argv) if re.match(r'^-[0-9A-Za-z_-]{10}$', a)] if idxs: correct_argv = ( ['youtube-dl'] + [a for i, a in enumerate(argv) if i not in idxs] + ['--'] + [argv[i] for i in idxs] ) self.report_warning( 'Long argument string detected. ' 'Use -- to separate parameters and URLs, like this:\n%s\n' % args_to_str(correct_argv)) def add_info_extractor(self, ie): """Add an InfoExtractor object to the end of the list.""" self._ies.append(ie) if not isinstance(ie, type): self._ies_instances[ie.ie_key()] = ie ie.set_downloader(self) def get_info_extractor(self, ie_key): """ Get an instance of an IE with name ie_key, it will try to get one from the _ies list, if there's no instance it will create a new one and add it to the extractor list. """ ie = self._ies_instances.get(ie_key) if ie is None: ie = get_info_extractor(ie_key)() self.add_info_extractor(ie) return ie def add_default_info_extractors(self): """ Add the InfoExtractors returned by gen_extractors to the end of the list """ for ie in gen_extractor_classes(): self.add_info_extractor(ie) def add_post_processor(self, pp): """Add a PostProcessor object to the end of the chain.""" self._pps.append(pp) pp.set_downloader(self) def add_progress_hook(self, ph): """Add the progress hook (currently only for the file downloader)""" self._progress_hooks.append(ph) def _bidi_workaround(self, message): if not hasattr(self, '_output_channel'): return message assert hasattr(self, '_output_process') assert isinstance(message, compat_str) line_count = message.count('\n') + 1 self._output_process.stdin.write((message + '\n').encode('utf-8')) self._output_process.stdin.flush() res = ''.join(self._output_channel.readline().decode('utf-8') for _ in range(line_count)) return res[:-len('\n')] def to_screen(self, message, skip_eol=False): """Print message to stdout if not in quiet mode.""" return self.to_stdout(message, skip_eol, check_quiet=True) def _write_string(self, s, out=None): write_string(s, out=out, encoding=self.params.get('encoding')) def to_stdout(self, message, skip_eol=False, check_quiet=False): """Print message to stdout if not in quiet mode.""" if self.params.get('logger'): self.params['logger'].debug(message) elif not check_quiet or not self.params.get('quiet', False): message = self._bidi_workaround(message) terminator = ['\n', ''][skip_eol] output = message + terminator self._write_string(output, self._screen_file) def to_stderr(self, message): """Print message to stderr.""" assert isinstance(message, compat_str) if self.params.get('logger'): self.params['logger'].error(message) else: message = self._bidi_workaround(message) output = message + '\n' self._write_string(output, self._err_file) def to_console_title(self, message): if not self.params.get('consoletitle', False): return if compat_os_name == 'nt': if ctypes.windll.kernel32.GetConsoleWindow(): # c_wchar_p() might not be necessary if `message` is # already of type unicode() ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) elif 'TERM' in os.environ: self._write_string('\033]0;%s\007' % message, self._screen_file) def save_console_title(self): if not self.params.get('consoletitle', False): return if compat_os_name != 'nt' and 'TERM' in os.environ: # Save the title on stack self._write_string('\033[22;0t', self._screen_file) def restore_console_title(self): if not self.params.get('consoletitle', False): return if compat_os_name != 'nt' and 'TERM' in os.environ: # Restore the title from stack self._write_string('\033[23;0t', self._screen_file) def __enter__(self): self.save_console_title() return self def __exit__(self, *args): self.restore_console_title() if self.params.get('cookiefile') is not None: self.cookiejar.save() def trouble(self, message=None, tb=None): """Determine action to take when a download problem appears. Depending on if the downloader has been configured to ignore download errors or not, this method may throw an exception or not when errors are found, after printing the message. tb, if given, is additional traceback information. """ if message is not None: self.to_stderr(message) if self.params.get('verbose'): if tb is None: if sys.exc_info()[0]: # if .trouble has been called from an except block tb = '' if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info)) tb += encode_compat_str(traceback.format_exc()) else: tb_data = traceback.format_list(traceback.extract_stack()) tb = ''.join(tb_data) self.to_stderr(tb) if not self.params.get('ignoreerrors', False): if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: exc_info = sys.exc_info()[1].exc_info else: exc_info = sys.exc_info() raise DownloadError(message, exc_info) self._download_retcode = 1 def report_warning(self, message): ''' Print the message to stderr, it will be prefixed with 'WARNING:' If stderr is a tty file the 'WARNING:' will be colored ''' if self.params.get('logger') is not None: self.params['logger'].warning(message) else: if self.params.get('no_warnings'): return if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt': _msg_header = '\033[0;33mWARNING:\033[0m' else: _msg_header = 'WARNING:' warning_message = '%s %s' % (_msg_header, message) self.to_stderr(warning_message) def report_error(self, message, tb=None): ''' Do the same as trouble, but prefixes the message with 'ERROR:', colored in red if stderr is a tty file. ''' if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt': _msg_header = '\033[0;31mERROR:\033[0m' else: _msg_header = 'ERROR:' error_message = '%s %s' % (_msg_header, message) self.trouble(error_message, tb) def report_file_already_downloaded(self, file_name): """Report file has already been fully downloaded.""" try: self.to_screen('[download] %s has already been downloaded' % file_name) except UnicodeEncodeError: self.to_screen('[download] The file has already been downloaded') def prepare_filename(self, info_dict): """Generate the output filename.""" try: template_dict = dict(info_dict) template_dict['epoch'] = int(time.time()) autonumber_size = self.params.get('autonumber_size') if autonumber_size is None: autonumber_size = 5 template_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads if template_dict.get('resolution') is None: if template_dict.get('width') and template_dict.get('height'): template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height']) elif template_dict.get('height'): template_dict['resolution'] = '%sp' % template_dict['height'] elif template_dict.get('width'): template_dict['resolution'] = '%dx?' % template_dict['width'] sanitize = lambda k, v: sanitize_filename( compat_str(v), restricted=self.params.get('restrictfilenames'), is_id=(k == 'id' or k.endswith('_id'))) template_dict = dict((k, v if isinstance(v, compat_numeric_types) else sanitize(k, v)) for k, v in template_dict.items() if v is not None and not isinstance(v, (list, tuple, dict))) template_dict = collections.defaultdict(lambda: 'NA', template_dict) outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) # For fields playlist_index and autonumber convert all occurrences # of %(field)s to %(field)0Nd for backward compatibility field_size_compat_map = { 'playlist_index': len(str(template_dict['n_entries'])), 'autonumber': autonumber_size, } FIELD_SIZE_COMPAT_RE = r'(?<!%)%\((?P<field>autonumber|playlist_index)\)s' mobj = re.search(FIELD_SIZE_COMPAT_RE, outtmpl) if mobj: outtmpl = re.sub( FIELD_SIZE_COMPAT_RE, r'%%(\1)0%dd' % field_size_compat_map[mobj.group('field')], outtmpl) # Missing numeric fields used together with integer presentation types # in format specification will break the argument substitution since # string 'NA' is returned for missing fields. We will patch output # template for missing fields to meet string presentation type. for numeric_field in self._NUMERIC_FIELDS: if numeric_field not in template_dict: # As of [1] format syntax is: # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting FORMAT_RE = r'''(?x) (?<!%) % \({0}\) # mapping key (?:[#0\-+ ]+)? # conversion flags (optional) (?:\d+)? # minimum field width (optional) (?:\.\d+)? # precision (optional) [hlL]? # length modifier (optional) [diouxXeEfFgGcrs%] # conversion type ''' outtmpl = re.sub( FORMAT_RE.format(numeric_field), r'%({0})s'.format(numeric_field), outtmpl) # expand_path translates '%%' into '%' and '$$' into '$' # correspondingly that is not what we want since we need to keep # '%%' intact for template dict substitution step. Working around # with boundary-alike separator hack. sep = ''.join([random.choice(ascii_letters) for _ in range(32)]) outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep)) # outtmpl should be expand_path'ed before template dict substitution # because meta fields may contain env variables we don't want to # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and # title "Hello $PATH", we don't want `$PATH` to be expanded. filename = expand_path(outtmpl).replace(sep, '') % template_dict # Temporary fix for #4787 # 'Treat' all problem characters by passing filename through preferredencoding # to workaround encoding issues with subprocess on python2 @ Windows if sys.version_info < (3, 0) and sys.platform == 'win32': filename = encodeFilename(filename, True).decode(preferredencoding()) return sanitize_path(filename) except ValueError as err: self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')') return None def _match_entry(self, info_dict, incomplete): """ Returns None iff the file should be downloaded """ video_title = info_dict.get('title', info_dict.get('id', 'video')) if 'title' in info_dict: # This can happen when we're just evaluating the playlist title = info_dict['title'] matchtitle = self.params.get('matchtitle', False) if matchtitle: if not re.search(matchtitle, title, re.IGNORECASE): return '"' + title + '" title did not match pattern "' + matchtitle + '"' rejecttitle = self.params.get('rejecttitle', False) if rejecttitle: if re.search(rejecttitle, title, re.IGNORECASE): return '"' + title + '" title matched reject pattern "' + rejecttitle + '"' date = info_dict.get('upload_date') if date is not None: dateRange = self.params.get('daterange', DateRange()) if date not in dateRange: return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange) view_count = info_dict.get('view_count') if view_count is not None: min_views = self.params.get('min_views') if min_views is not None and view_count < min_views: return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views) max_views = self.params.get('max_views') if max_views is not None and view_count > max_views: return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views) if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')): return 'Skipping "%s" because it is age restricted' % video_title if self.in_download_archive(info_dict): return '%s has already been recorded in archive' % video_title if not incomplete: match_filter = self.params.get('match_filter') if match_filter is not None: ret = match_filter(info_dict) if ret is not None: return ret return None @staticmethod def add_extra_info(info_dict, extra_info): '''Set the keys from extra_info in info dict if they are missing''' for key, value in extra_info.items(): info_dict.setdefault(key, value) def extract_info(self, url, download=True, ie_key=None, extra_info={}, process=True, force_generic_extractor=False): ''' Returns a list with a dictionary for each video we find. If 'download', also downloads the videos. extra_info is a dict containing the extra values to add to each result ''' if not ie_key and force_generic_extractor: ie_key = 'Generic' if ie_key: ies = [self.get_info_extractor(ie_key)] else: ies = self._ies for ie in ies: if not ie.suitable(url): continue ie = self.get_info_extractor(ie.ie_key()) if not ie.working(): self.report_warning('The program functionality for this site has been marked as broken, ' 'and will probably not work.') try: ie_result = ie.extract(url) if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here) break if isinstance(ie_result, list): # Backwards compatibility: old IE result format ie_result = { '_type': 'compat_list', 'entries': ie_result, } self.add_default_extra_info(ie_result, ie, url) if process: return self.process_ie_result(ie_result, download, extra_info) else: return ie_result except GeoRestrictedError as e: msg = e.msg if e.countries: msg += '\nThis video is available in %s.' % ', '.join( map(ISO3166Utils.short2full, e.countries)) msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.' self.report_error(msg) break except ExtractorError as e: # An error we somewhat expected self.report_error(compat_str(e), e.format_traceback()) break except MaxDownloadsReached: raise except Exception as e: if self.params.get('ignoreerrors', False): self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc())) break else: raise else: self.report_error('no suitable InfoExtractor for URL %s' % url) def add_default_extra_info(self, ie_result, ie, url): self.add_extra_info(ie_result, { 'extractor': ie.IE_NAME, 'webpage_url': url, 'webpage_url_basename': url_basename(url), 'extractor_key': ie.ie_key(), }) def process_ie_result(self, ie_result, download=True, extra_info={}): """ Take the result of the ie(may be modified) and resolve all unresolved references (URLs, playlist items). It will also download the videos if 'download'. Returns the resolved ie_result. """ result_type = ie_result.get('_type', 'video') if result_type in ('url', 'url_transparent'): ie_result['url'] = sanitize_url(ie_result['url']) extract_flat = self.params.get('extract_flat', False) if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or extract_flat is True): if self.params.get('forcejson', False): self.to_stdout(json.dumps(ie_result)) return ie_result if result_type == 'video': self.add_extra_info(ie_result, extra_info) return self.process_video_result(ie_result, download=download) elif result_type == 'url': # We have to add extra_info to the results because it may be # contained in a playlist return self.extract_info(ie_result['url'], download, ie_key=ie_result.get('ie_key'), extra_info=extra_info) elif result_type == 'url_transparent': # Use the information from the embedding page info = self.extract_info( ie_result['url'], ie_key=ie_result.get('ie_key'), extra_info=extra_info, download=False, process=False) # extract_info may return None when ignoreerrors is enabled and # extraction failed with an error, don't crash and return early # in this case if not info: return info force_properties = dict( (k, v) for k, v in ie_result.items() if v is not None) for f in ('_type', 'url', 'ie_key'): if f in force_properties: del force_properties[f] new_result = info.copy() new_result.update(force_properties) # Extracted info may not be a video result (i.e. # info.get('_type', 'video') != video) but rather an url or # url_transparent. In such cases outer metadata (from ie_result) # should be propagated to inner one (info). For this to happen # _type of info should be overridden with url_transparent. This # fixes issue from https://github.com/rg3/youtube-dl/pull/11163. if new_result.get('_type') == 'url': new_result['_type'] = 'url_transparent' return self.process_ie_result( new_result, download=download, extra_info=extra_info) elif result_type in ('playlist', 'multi_video'): # We process each entry in the playlist playlist = ie_result.get('title') or ie_result.get('id') self.to_screen('[download] Downloading playlist: %s' % playlist) playlist_results = [] playliststart = self.params.get('playliststart', 1) - 1 playlistend = self.params.get('playlistend') # For backwards compatibility, interpret -1 as whole list if playlistend == -1: playlistend = None playlistitems_str = self.params.get('playlist_items') playlistitems = None if playlistitems_str is not None: def iter_playlistitems(format): for string_segment in format.split(','): if '-' in string_segment: start, end = string_segment.split('-') for item in range(int(start), int(end) + 1): yield int(item) else: yield int(string_segment) playlistitems = iter_playlistitems(playlistitems_str) ie_entries = ie_result['entries'] if isinstance(ie_entries, list): n_all_entries = len(ie_entries) if playlistitems: entries = [ ie_entries[i - 1] for i in playlistitems if -n_all_entries <= i - 1 < n_all_entries] else: entries = ie_entries[playliststart:playlistend] n_entries = len(entries) self.to_screen( '[%s] playlist %s: Collected %d video ids (downloading %d of them)' % (ie_result['extractor'], playlist, n_all_entries, n_entries)) elif isinstance(ie_entries, PagedList): if playlistitems: entries = [] for item in playlistitems: entries.extend(ie_entries.getslice( item - 1, item )) else: entries = ie_entries.getslice( playliststart, playlistend) n_entries = len(entries) self.to_screen( '[%s] playlist %s: Downloading %d videos' % (ie_result['extractor'], playlist, n_entries)) else: # iterable if playlistitems: entry_list = list(ie_entries) entries = [entry_list[i - 1] for i in playlistitems] else: entries = list(itertools.islice( ie_entries, playliststart, playlistend)) n_entries = len(entries) self.to_screen( '[%s] playlist %s: Downloading %d videos' % (ie_result['extractor'], playlist, n_entries)) if self.params.get('playlistreverse', False): entries = entries[::-1] if self.params.get('playlistrandom', False): random.shuffle(entries) x_forwarded_for = ie_result.get('__x_forwarded_for_ip') for i, entry in enumerate(entries, 1): self.to_screen('[download] Downloading video %s of %s' % (i, n_entries)) # This __x_forwarded_for_ip thing is a bit ugly but requires # minimal changes if x_forwarded_for: entry['__x_forwarded_for_ip'] = x_forwarded_for extra = { 'n_entries': n_entries, 'playlist': playlist, 'playlist_id': ie_result.get('id'), 'playlist_title': ie_result.get('title'), 'playlist_index': i + playliststart, 'extractor': ie_result['extractor'], 'webpage_url': ie_result['webpage_url'], 'webpage_url_basename': url_basename(ie_result['webpage_url']), 'extractor_key': ie_result['extractor_key'], } reason = self._match_entry(entry, incomplete=True) if reason is not None: self.to_screen('[download] ' + reason) continue entry_result = self.process_ie_result(entry, download=download, extra_info=extra) playlist_results.append(entry_result) ie_result['entries'] = playlist_results self.to_screen('[download] Finished downloading playlist: %s' % playlist) return ie_result elif result_type == 'compat_list': self.report_warning( 'Extractor %s returned a compat_list result. ' 'It needs to be updated.' % ie_result.get('extractor')) def _fixup(r): self.add_extra_info( r, { 'extractor': ie_result['extractor'], 'webpage_url': ie_result['webpage_url'], 'webpage_url_basename': url_basename(ie_result['webpage_url']), 'extractor_key': ie_result['extractor_key'], } ) return r ie_result['entries'] = [ self.process_ie_result(_fixup(r), download, extra_info) for r in ie_result['entries'] ] return ie_result else: raise Exception('Invalid result type: %s' % result_type) def _build_format_filter(self, filter_spec): " Returns a function to filter the formats according to the filter_spec " OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>width|height|tbr|abr|vbr|asr|filesize|fps) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?) $ ''' % '|'.join(map(re.escape, OPERATORS.keys()))) m = operator_rex.search(filter_spec) if m: try: comparison_value = int(m.group('value')) except ValueError: comparison_value = parse_filesize(m.group('value')) if comparison_value is None: comparison_value = parse_filesize(m.group('value') + 'B') if comparison_value is None: raise ValueError( 'Invalid value %r in format specification %r' % ( m.group('value'), filter_spec)) op = OPERATORS[m.group('op')] if not m: STR_OPERATORS = { '=': operator.eq, '!=': operator.ne, '^=': lambda attr, value: attr.startswith(value), '$=': lambda attr, value: attr.endswith(value), '*=': lambda attr, value: value in attr, } str_operator_rex = re.compile(r'''(?x) \s*(?P<key>ext|acodec|vcodec|container|protocol|format_id) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)? \s*(?P<value>[a-zA-Z0-9._-]+) \s*$ ''' % '|'.join(map(re.escape, STR_OPERATORS.keys()))) m = str_operator_rex.search(filter_spec) if m: comparison_value = m.group('value') op = STR_OPERATORS[m.group('op')] if not m: raise ValueError('Invalid filter specification %r' % filter_spec) def _filter(f): actual_value = f.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) return _filter def build_format_selector(self, format_spec): def syntax_error(note, start): message = ( 'Invalid format specification: ' '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1])) return SyntaxError(message) PICKFIRST = 'PICKFIRST' MERGE = 'MERGE' SINGLE = 'SINGLE' GROUP = 'GROUP' FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters']) def _parse_filter(tokens): filter_parts = [] for type, string, start, _, _ in tokens: if type == tokenize.OP and string == ']': return ''.join(filter_parts) else: filter_parts.append(string) def _remove_unused_ops(tokens): # Remove operators that we don't use and join them with the surrounding strings # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9' ALLOWED_OPS = ('/', '+', ',', '(', ')') last_string, last_start, last_end, last_line = None, None, None, None for type, string, start, end, line in tokens: if type == tokenize.OP and string == '[': if last_string: yield tokenize.NAME, last_string, last_start, last_end, last_line last_string = None yield type, string, start, end, line # everything inside brackets will be handled by _parse_filter for type, string, start, end, line in tokens: yield type, string, start, end, line if type == tokenize.OP and string == ']': break elif type == tokenize.OP and string in ALLOWED_OPS: if last_string: yield tokenize.NAME, last_string, last_start, last_end, last_line last_string = None yield type, string, start, end, line elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]: if not last_string: last_string = string last_start = start last_end = end else: last_string += string if last_string: yield tokenize.NAME, last_string, last_start, last_end, last_line def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False): selectors = [] current_selector = None for type, string, start, _, _ in tokens: # ENCODING is only defined in python 3.x if type == getattr(tokenize, 'ENCODING', None): continue elif type in [tokenize.NAME, tokenize.NUMBER]: current_selector = FormatSelector(SINGLE, string, []) elif type == tokenize.OP: if string == ')': if not inside_group: # ')' will be handled by the parentheses group tokens.restore_last_token() break elif inside_merge and string in ['/', ',']: tokens.restore_last_token() break elif inside_choice and string == ',': tokens.restore_last_token() break elif string == ',': if not current_selector: raise syntax_error('"," must follow a format selector', start) selectors.append(current_selector) current_selector = None elif string == '/': if not current_selector: raise syntax_error('"/" must follow a format selector', start) first_choice = current_selector second_choice = _parse_format_selection(tokens, inside_choice=True) current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), []) elif string == '[': if not current_selector: current_selector = FormatSelector(SINGLE, 'best', []) format_filter = _parse_filter(tokens) current_selector.filters.append(format_filter) elif string == '(': if current_selector: raise syntax_error('Unexpected "("', start) group = _parse_format_selection(tokens, inside_group=True) current_selector = FormatSelector(GROUP, group, []) elif string == '+': video_selector = current_selector audio_selector = _parse_format_selection(tokens, inside_merge=True) if not video_selector or not audio_selector: raise syntax_error('"+" must be between two format selectors', start) current_selector = FormatSelector(MERGE, (video_selector, audio_selector), []) else: raise syntax_error('Operator not recognized: "{0}"'.format(string), start) elif type == tokenize.ENDMARKER: break if current_selector: selectors.append(current_selector) return selectors def _build_selector_function(selector): if isinstance(selector, list): fs = [_build_selector_function(s) for s in selector] def selector_function(ctx): for f in fs: for format in f(ctx): yield format return selector_function elif selector.type == GROUP: selector_function = _build_selector_function(selector.selector) elif selector.type == PICKFIRST: fs = [_build_selector_function(s) for s in selector.selector] def selector_function(ctx): for f in fs: picked_formats = list(f(ctx)) if picked_formats: return picked_formats return [] elif selector.type == SINGLE: format_spec = selector.selector def selector_function(ctx): formats = list(ctx['formats']) if not formats: return if format_spec == 'all': for f in formats: yield f elif format_spec in ['best', 'worst', None]: format_idx = 0 if format_spec == 'worst' else -1 audiovideo_formats = [ f for f in formats if f.get('vcodec') != 'none' and f.get('acodec') != 'none'] if audiovideo_formats: yield audiovideo_formats[format_idx] # for extractors with incomplete formats (audio only (soundcloud) # or video only (imgur)) we will fallback to best/worst # {video,audio}-only format elif ctx['incomplete_formats']: yield formats[format_idx] elif format_spec == 'bestaudio': audio_formats = [ f for f in formats if f.get('vcodec') == 'none'] if audio_formats: yield audio_formats[-1] elif format_spec == 'worstaudio': audio_formats = [ f for f in formats if f.get('vcodec') == 'none'] if audio_formats: yield audio_formats[0] elif format_spec == 'bestvideo': video_formats = [ f for f in formats if f.get('acodec') == 'none'] if video_formats: yield video_formats[-1] elif format_spec == 'worstvideo': video_formats = [ f for f in formats if f.get('acodec') == 'none'] if video_formats: yield video_formats[0] else: extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav'] if format_spec in extensions: filter_f = lambda f: f['ext'] == format_spec else: filter_f = lambda f: f['format_id'] == format_spec matches = list(filter(filter_f, formats)) if matches: yield matches[-1] elif selector.type == MERGE: def _merge(formats_info): format_1, format_2 = [f['format_id'] for f in formats_info] # The first format must contain the video and the # second the audio if formats_info[0].get('vcodec') == 'none': self.report_error('The first format must ' 'contain the video, try using ' '"-f %s+%s"' % (format_2, format_1)) return # Formats must be opposite (video+audio) if formats_info[0].get('acodec') == 'none' and formats_info[1].get('acodec') == 'none': self.report_error( 'Both formats %s and %s are video-only, you must specify "-f video+audio"' % (format_1, format_2)) return output_ext = ( formats_info[0]['ext'] if self.params.get('merge_output_format') is None else self.params['merge_output_format']) return { 'requested_formats': formats_info, 'format': '%s+%s' % (formats_info[0].get('format'), formats_info[1].get('format')), 'format_id': '%s+%s' % (formats_info[0].get('format_id'), formats_info[1].get('format_id')), 'width': formats_info[0].get('width'), 'height': formats_info[0].get('height'), 'resolution': formats_info[0].get('resolution'), 'fps': formats_info[0].get('fps'), 'vcodec': formats_info[0].get('vcodec'), 'vbr': formats_info[0].get('vbr'), 'stretched_ratio': formats_info[0].get('stretched_ratio'), 'acodec': formats_info[1].get('acodec'), 'abr': formats_info[1].get('abr'), 'ext': output_ext, } video_selector, audio_selector = map(_build_selector_function, selector.selector) def selector_function(ctx): for pair in itertools.product( video_selector(copy.deepcopy(ctx)), audio_selector(copy.deepcopy(ctx))): yield _merge(pair) filters = [self._build_format_filter(f) for f in selector.filters] def final_selector(ctx): ctx_copy = copy.deepcopy(ctx) for _filter in filters: ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats'])) return selector_function(ctx_copy) return final_selector stream = io.BytesIO(format_spec.encode('utf-8')) try: tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline))) except tokenize.TokenError: raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec))) class TokenIterator(object): def __init__(self, tokens): self.tokens = tokens self.counter = 0 def __iter__(self): return self def __next__(self): if self.counter >= len(self.tokens): raise StopIteration() value = self.tokens[self.counter] self.counter += 1 return value next = __next__ def restore_last_token(self): self.counter -= 1 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens))) return _build_selector_function(parsed_selector) def _calc_headers(self, info_dict): res = std_headers.copy() add_headers = info_dict.get('http_headers') if add_headers: res.update(add_headers) cookies = self._calc_cookies(info_dict) if cookies: res['Cookie'] = cookies if 'X-Forwarded-For' not in res: x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip') if x_forwarded_for_ip: res['X-Forwarded-For'] = x_forwarded_for_ip return res def _calc_cookies(self, info_dict): pr = sanitized_Request(info_dict['url']) self.cookiejar.add_cookie_header(pr) return pr.get_header('Cookie') def process_video_result(self, info_dict, download=True): assert info_dict.get('_type', 'video') == 'video' if 'id' not in info_dict: raise ExtractorError('Missing "id" field in extractor result') if 'title' not in info_dict: raise ExtractorError('Missing "title" field in extractor result') def report_force_conversion(field, field_not, conversion): self.report_warning( '"%s" field is not %s - forcing %s conversion, there is an error in extractor' % (field, field_not, conversion)) def sanitize_string_field(info, string_field): field = info.get(string_field) if field is None or isinstance(field, compat_str): return report_force_conversion(string_field, 'a string', 'string') info[string_field] = compat_str(field) def sanitize_numeric_fields(info): for numeric_field in self._NUMERIC_FIELDS: field = info.get(numeric_field) if field is None or isinstance(field, compat_numeric_types): continue report_force_conversion(numeric_field, 'numeric', 'int') info[numeric_field] = int_or_none(field) sanitize_string_field(info_dict, 'id') sanitize_numeric_fields(info_dict) if 'playlist' not in info_dict: # It isn't part of a playlist info_dict['playlist'] = None info_dict['playlist_index'] = None thumbnails = info_dict.get('thumbnails') if thumbnails is None: thumbnail = info_dict.get('thumbnail') if thumbnail: info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}] if thumbnails: thumbnails.sort(key=lambda t: ( t.get('preference') if t.get('preference') is not None else -1, t.get('width') if t.get('width') is not None else -1, t.get('height') if t.get('height') is not None else -1, t.get('id') if t.get('id') is not None else '', t.get('url'))) for i, t in enumerate(thumbnails): t['url'] = sanitize_url(t['url']) if t.get('width') and t.get('height'): t['resolution'] = '%dx%d' % (t['width'], t['height']) if t.get('id') is None: t['id'] = '%d' % i if self.params.get('list_thumbnails'): self.list_thumbnails(info_dict) return thumbnail = info_dict.get('thumbnail') if thumbnail: info_dict['thumbnail'] = sanitize_url(thumbnail) elif thumbnails: info_dict['thumbnail'] = thumbnails[-1]['url'] if 'display_id' not in info_dict and 'id' in info_dict: info_dict['display_id'] = info_dict['id'] if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None: # Working around out-of-range timestamp values (e.g. negative ones on Windows, # see http://bugs.python.org/issue1646728) try: upload_date = datetime.datetime.utcfromtimestamp(info_dict['timestamp']) info_dict['upload_date'] = upload_date.strftime('%Y%m%d') except (ValueError, OverflowError, OSError): pass # Auto generate title fields corresponding to the *_number fields when missing # in order to always have clean titles. This is very common for TV series. for field in ('chapter', 'season', 'episode'): if info_dict.get('%s_number' % field) is not None and not info_dict.get(field): info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field]) subtitles = info_dict.get('subtitles') if subtitles: for _, subtitle in subtitles.items(): for subtitle_format in subtitle: if subtitle_format.get('url'): subtitle_format['url'] = sanitize_url(subtitle_format['url']) if subtitle_format.get('ext') is None: subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower() if self.params.get('listsubtitles', False): if 'automatic_captions' in info_dict: self.list_subtitles(info_dict['id'], info_dict.get('automatic_captions'), 'automatic captions') self.list_subtitles(info_dict['id'], subtitles, 'subtitles') return info_dict['requested_subtitles'] = self.process_subtitles( info_dict['id'], subtitles, info_dict.get('automatic_captions')) # We now pick which formats have to be downloaded if info_dict.get('formats') is None: # There's only one format available formats = [info_dict] else: formats = info_dict['formats'] if not formats: raise ExtractorError('No video formats found!') def is_wellformed(f): url = f.get('url') valid_url = url and isinstance(url, compat_str) if not valid_url: self.report_warning( '"url" field is missing or empty - skipping format, ' 'there is an error in extractor') return valid_url # Filter out malformed formats for better extraction robustness formats = list(filter(is_wellformed, formats)) formats_dict = {} # We check that all the formats have the format and format_id fields for i, format in enumerate(formats): sanitize_string_field(format, 'format_id') sanitize_numeric_fields(format) format['url'] = sanitize_url(format['url']) if format.get('format_id') is None: format['format_id'] = compat_str(i) else: # Sanitize format_id from characters used in format selector expression format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id']) format_id = format['format_id'] if format_id not in formats_dict: formats_dict[format_id] = [] formats_dict[format_id].append(format) # Make sure all formats have unique format_id for format_id, ambiguous_formats in formats_dict.items(): if len(ambiguous_formats) > 1: for i, format in enumerate(ambiguous_formats): format['format_id'] = '%s-%d' % (format_id, i) for i, format in enumerate(formats): if format.get('format') is None: format['format'] = '{id} - {res}{note}'.format( id=format['format_id'], res=self.format_resolution(format), note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '', ) # Automatically determine file extension if missing if format.get('ext') is None: format['ext'] = determine_ext(format['url']).lower() # Automatically determine protocol if missing (useful for format # selection purposes) if format.get('protocol') is None: format['protocol'] = determine_protocol(format) # Add HTTP headers, so that external programs can use them from the # json output full_format_info = info_dict.copy() full_format_info.update(format) format['http_headers'] = self._calc_headers(full_format_info) # Remove private housekeeping stuff if '__x_forwarded_for_ip' in info_dict: del info_dict['__x_forwarded_for_ip'] # TODO Central sorting goes here if formats[0] is not info_dict: # only set the 'formats' fields if the original info_dict list them # otherwise we end up with a circular reference, the first (and unique) # element in the 'formats' field in info_dict is info_dict itself, # which can't be exported to json info_dict['formats'] = formats if self.params.get('listformats'): self.list_formats(info_dict) return req_format = self.params.get('format') if req_format is None: req_format_list = [] if (self.params.get('outtmpl', DEFAULT_OUTTMPL) != '-' and not info_dict.get('is_live')): merger = FFmpegMergerPP(self) if merger.available and merger.can_merge(): req_format_list.append('bestvideo+bestaudio') req_format_list.append('best') req_format = '/'.join(req_format_list) format_selector = self.build_format_selector(req_format) # While in format selection we may need to have an access to the original # format set in order to calculate some metrics or do some processing. # For now we need to be able to guess whether original formats provided # by extractor are incomplete or not (i.e. whether extractor provides only # video-only or audio-only formats) for proper formats selection for # extractors with such incomplete formats (see # https://github.com/rg3/youtube-dl/pull/5556). # Since formats may be filtered during format selection and may not match # the original formats the results may be incorrect. Thus original formats # or pre-calculated metrics should be passed to format selection routines # as well. # We will pass a context object containing all necessary additional data # instead of just formats. # This fixes incorrect format selection issue (see # https://github.com/rg3/youtube-dl/issues/10083). incomplete_formats = ( # All formats are video-only or all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats) or # all formats are audio-only all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)) ctx = { 'formats': formats, 'incomplete_formats': incomplete_formats, } formats_to_download = list(format_selector(ctx)) if not formats_to_download: raise ExtractorError('requested format not available', expected=True) if download: if len(formats_to_download) > 1: self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download))) for format in formats_to_download: new_info = dict(info_dict) new_info.update(format) self.process_info(new_info) # We update the info dict with the best quality format (backwards compatibility) info_dict.update(formats_to_download[-1]) return info_dict def process_subtitles(self, video_id, normal_subtitles, automatic_captions): """Select the requested subtitles and their format""" available_subs = {} if normal_subtitles and self.params.get('writesubtitles'): available_subs.update(normal_subtitles) if automatic_captions and self.params.get('writeautomaticsub'): for lang, cap_info in automatic_captions.items(): if lang not in available_subs: available_subs[lang] = cap_info if (not self.params.get('writesubtitles') and not self.params.get('writeautomaticsub') or not available_subs): return None if self.params.get('allsubtitles', False): requested_langs = available_subs.keys() else: if self.params.get('subtitleslangs', False): requested_langs = self.params.get('subtitleslangs') elif 'en' in available_subs: requested_langs = ['en'] else: requested_langs = [list(available_subs.keys())[0]] formats_query = self.params.get('subtitlesformat', 'best') formats_preference = formats_query.split('/') if formats_query else [] subs = {} for lang in requested_langs: formats = available_subs.get(lang) if formats is None: self.report_warning('%s subtitles not available for %s' % (lang, video_id)) continue for ext in formats_preference: if ext == 'best': f = formats[-1] break matches = list(filter(lambda f: f['ext'] == ext, formats)) if matches: f = matches[-1] break else: f = formats[-1] self.report_warning( 'No subtitle format found matching "%s" for language %s, ' 'using %s' % (formats_query, lang, f['ext'])) subs[lang] = f return subs def process_info(self, info_dict): """Process a single resolved IE result.""" assert info_dict.get('_type', 'video') == 'video' max_downloads = self.params.get('max_downloads') if max_downloads is not None: if self._num_downloads >= int(max_downloads): raise MaxDownloadsReached() info_dict['fulltitle'] = info_dict['title'] if len(info_dict['title']) > 200: info_dict['title'] = info_dict['title'][:197] + '...' if 'format' not in info_dict: info_dict['format'] = info_dict['ext'] reason = self._match_entry(info_dict, incomplete=False) if reason is not None: self.to_screen('[download] ' + reason) return self._num_downloads += 1 info_dict['_filename'] = filename = self.prepare_filename(info_dict) # Forced printings if self.params.get('forcetitle', False): self.to_stdout(info_dict['fulltitle']) if self.params.get('forceid', False): self.to_stdout(info_dict['id']) if self.params.get('forceurl', False): if info_dict.get('requested_formats') is not None: for f in info_dict['requested_formats']: self.to_stdout(f['url'] + f.get('play_path', '')) else: # For RTMP URLs, also include the playpath self.to_stdout(info_dict['url'] + info_dict.get('play_path', '')) if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None: self.to_stdout(info_dict['thumbnail']) if self.params.get('forcedescription', False) and info_dict.get('description') is not None: self.to_stdout(info_dict['description']) if self.params.get('forcefilename', False) and filename is not None: self.to_stdout(filename) if self.params.get('forceduration', False) and info_dict.get('duration') is not None: self.to_stdout(formatSeconds(info_dict['duration'])) if self.params.get('forceformat', False): self.to_stdout(info_dict['format']) if self.params.get('forcejson', False): self.to_stdout(json.dumps(info_dict)) # Do nothing else if in simulate mode if self.params.get('simulate', False): return if filename is None: return try: dn = os.path.dirname(sanitize_path(encodeFilename(filename))) if dn and not os.path.exists(dn): os.makedirs(dn) except (OSError, IOError) as err: self.report_error('unable to create directory ' + error_to_compat_str(err)) return if self.params.get('writedescription', False): descfn = replace_extension(filename, 'description', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)): self.to_screen('[info] Video description is already present') elif info_dict.get('description') is None: self.report_warning('There\'s no description to write.') else: try: self.to_screen('[info] Writing video description to: ' + descfn) with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: descfile.write(info_dict['description']) except (OSError, IOError): self.report_error('Cannot write description file ' + descfn) return if self.params.get('writeannotations', False): annofn = replace_extension(filename, 'annotations.xml', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)): self.to_screen('[info] Video annotations are already present') else: try: self.to_screen('[info] Writing video annotations to: ' + annofn) with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile: annofile.write(info_dict['annotations']) except (KeyError, TypeError): self.report_warning('There are no annotations to write.') except (OSError, IOError): self.report_error('Cannot write annotations file: ' + annofn) return subtitles_are_requested = any([self.params.get('writesubtitles', False), self.params.get('writeautomaticsub')]) if subtitles_are_requested and info_dict.get('requested_subtitles'): # subtitles download errors are already managed as troubles in relevant IE # that way it will silently go on when used with unsupporting IE subtitles = info_dict['requested_subtitles'] ie = self.get_info_extractor(info_dict['extractor_key']) for sub_lang, sub_info in subtitles.items(): sub_format = sub_info['ext'] if sub_info.get('data') is not None: sub_data = sub_info['data'] else: try: sub_data = ie._download_webpage( sub_info['url'], info_dict['id'], note=False) except ExtractorError as err: self.report_warning('Unable to download subtitle for "%s": %s' % (sub_lang, error_to_compat_str(err.cause))) continue try: sub_filename = subtitles_filename(filename, sub_lang, sub_format) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)): self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format)) else: self.to_screen('[info] Writing video subtitles to: ' + sub_filename) # Use newline='' to prevent conversion of newline characters # See https://github.com/rg3/youtube-dl/issues/10268 with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile: subfile.write(sub_data) except (OSError, IOError): self.report_error('Cannot write subtitles file ' + sub_filename) return if self.params.get('writeinfojson', False): infofn = replace_extension(filename, 'info.json', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)): self.to_screen('[info] Video description metadata is already present') else: self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn) try: write_json_file(self.filter_requested_info(info_dict), infofn) except (OSError, IOError): self.report_error('Cannot write metadata to JSON file ' + infofn) return self._write_thumbnails(info_dict, filename) if not self.params.get('skip_download', False): try: def dl(name, info): fd = get_suitable_downloader(info, self.params)(self, self.params) for ph in self._progress_hooks: fd.add_progress_hook(ph) if self.params.get('verbose'): self.to_stdout('[debug] Invoking downloader on %r' % info.get('url')) return fd.download(name, info) if info_dict.get('requested_formats') is not None: downloaded = [] success = True merger = FFmpegMergerPP(self) if not merger.available: postprocessors = [] self.report_warning('You have requested multiple ' 'formats but ffmpeg or avconv are not installed.' ' The formats won\'t be merged.') else: postprocessors = [merger] def compatible_formats(formats): video, audio = formats # Check extension video_ext, audio_ext = audio.get('ext'), video.get('ext') if video_ext and audio_ext: COMPATIBLE_EXTS = ( ('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'), ('webm') ) for exts in COMPATIBLE_EXTS: if video_ext in exts and audio_ext in exts: return True # TODO: Check acodec/vcodec return False filename_real_ext = os.path.splitext(filename)[1][1:] filename_wo_ext = ( os.path.splitext(filename)[0] if filename_real_ext == info_dict['ext'] else filename) requested_formats = info_dict['requested_formats'] if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats): info_dict['ext'] = 'mkv' self.report_warning( 'Requested formats are incompatible for merge and will be merged into mkv.') # Ensure filename always has a correct extension for successful merge filename = '%s.%s' % (filename_wo_ext, info_dict['ext']) if os.path.exists(encodeFilename(filename)): self.to_screen( '[download] %s has already been downloaded and ' 'merged' % filename) else: for f in requested_formats: new_info = dict(info_dict) new_info.update(f) fname = self.prepare_filename(new_info) fname = prepend_extension(fname, 'f%s' % f['format_id'], new_info['ext']) downloaded.append(fname) partial_success = dl(fname, new_info) success = success and partial_success info_dict['__postprocessors'] = postprocessors info_dict['__files_to_merge'] = downloaded else: # Just a single file success = dl(filename, info_dict) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_error('unable to download video data: %s' % error_to_compat_str(err)) return except (OSError, IOError) as err: raise UnavailableVideoError(err) except (ContentTooShortError, ) as err: self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) return if success and filename != '-': # Fixup content fixup_policy = self.params.get('fixup') if fixup_policy is None: fixup_policy = 'detect_or_warn' INSTALL_FFMPEG_MESSAGE = 'Install ffmpeg or avconv to fix this automatically.' stretched_ratio = info_dict.get('stretched_ratio') if stretched_ratio is not None and stretched_ratio != 1: if fixup_policy == 'warn': self.report_warning('%s: Non-uniform pixel ratio (%s)' % ( info_dict['id'], stretched_ratio)) elif fixup_policy == 'detect_or_warn': stretched_pp = FFmpegFixupStretchedPP(self) if stretched_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(stretched_pp) else: self.report_warning( '%s: Non-uniform pixel ratio (%s). %s' % (info_dict['id'], stretched_ratio, INSTALL_FFMPEG_MESSAGE)) else: assert fixup_policy in ('ignore', 'never') if (info_dict.get('requested_formats') is None and info_dict.get('container') == 'm4a_dash'): if fixup_policy == 'warn': self.report_warning( '%s: writing DASH m4a. ' 'Only some players support this container.' % info_dict['id']) elif fixup_policy == 'detect_or_warn': fixup_pp = FFmpegFixupM4aPP(self) if fixup_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(fixup_pp) else: self.report_warning( '%s: writing DASH m4a. ' 'Only some players support this container. %s' % (info_dict['id'], INSTALL_FFMPEG_MESSAGE)) else: assert fixup_policy in ('ignore', 'never') if (info_dict.get('protocol') == 'm3u8_native' or info_dict.get('protocol') == 'm3u8' and self.params.get('hls_prefer_native')): if fixup_policy == 'warn': self.report_warning('%s: malformed AAC bitstream detected.' % ( info_dict['id'])) elif fixup_policy == 'detect_or_warn': fixup_pp = FFmpegFixupM3u8PP(self) if fixup_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(fixup_pp) else: self.report_warning( '%s: malformed AAC bitstream detected. %s' % (info_dict['id'], INSTALL_FFMPEG_MESSAGE)) else: assert fixup_policy in ('ignore', 'never') try: self.post_process(filename, info_dict) except (PostProcessingError) as err: self.report_error('postprocessing: %s' % str(err)) return self.record_download_archive(info_dict) def download(self, url_list): """Download a given list of URLs.""" outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) if (len(url_list) > 1 and outtmpl != '-' and '%' not in outtmpl and self.params.get('max_downloads') != 1): raise SameFileError(outtmpl) for url in url_list: try: # It also downloads the videos res = self.extract_info( url, force_generic_extractor=self.params.get('force_generic_extractor', False)) except UnavailableVideoError: self.report_error('unable to download video') except MaxDownloadsReached: self.to_screen('[info] Maximum number of downloaded files reached.') raise else: if self.params.get('dump_single_json', False): self.to_stdout(json.dumps(res)) return self._download_retcode def download_with_info_file(self, info_filename): with contextlib.closing(fileinput.FileInput( [info_filename], mode='r', openhook=fileinput.hook_encoded('utf-8'))) as f: # FileInput doesn't have a read method, we can't call json.load info = self.filter_requested_info(json.loads('\n'.join(f))) try: self.process_ie_result(info, download=True) except DownloadError: webpage_url = info.get('webpage_url') if webpage_url is not None: self.report_warning('The info failed to download, trying with "%s"' % webpage_url) return self.download([webpage_url]) else: raise return self._download_retcode @staticmethod def filter_requested_info(info_dict): return dict( (k, v) for k, v in info_dict.items() if k not in ['requested_formats', 'requested_subtitles']) def post_process(self, filename, ie_info): """Run all the postprocessors on the given file.""" info = dict(ie_info) info['filepath'] = filename pps_chain = [] if ie_info.get('__postprocessors') is not None: pps_chain.extend(ie_info['__postprocessors']) pps_chain.extend(self._pps) for pp in pps_chain: files_to_delete = [] try: files_to_delete, info = pp.run(info) except PostProcessingError as e: self.report_error(e.msg) if files_to_delete and not self.params.get('keepvideo', False): for old_filename in files_to_delete: self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename) try: os.remove(encodeFilename(old_filename)) except (IOError, OSError): self.report_warning('Unable to remove downloaded original file') def _make_archive_id(self, info_dict): # Future-proof against any change in case # and backwards compatibility with prior versions extractor = info_dict.get('extractor_key') if extractor is None: if 'id' in info_dict: extractor = info_dict.get('ie_key') # key in a playlist if extractor is None: return None # Incomplete video information return extractor.lower() + ' ' + info_dict['id'] def in_download_archive(self, info_dict): fn = self.params.get('download_archive') if fn is None: return False vid_id = self._make_archive_id(info_dict) if vid_id is None: return False # Incomplete video information try: with locked_file(fn, 'r', encoding='utf-8') as archive_file: for line in archive_file: if line.strip() == vid_id: return True except IOError as ioe: if ioe.errno != errno.ENOENT: raise return False def record_download_archive(self, info_dict): fn = self.params.get('download_archive') if fn is None: return vid_id = self._make_archive_id(info_dict) assert vid_id with locked_file(fn, 'a', encoding='utf-8') as archive_file: archive_file.write(vid_id + '\n') @staticmethod def format_resolution(format, default='unknown'): if format.get('vcodec') == 'none': return 'audio only' if format.get('resolution') is not None: return format['resolution'] if format.get('height') is not None: if format.get('width') is not None: res = '%sx%s' % (format['width'], format['height']) else: res = '%sp' % format['height'] elif format.get('width') is not None: res = '%dx?' % format['width'] else: res = default return res def _format_note(self, fdict): res = '' if fdict.get('ext') in ['f4f', 'f4m']: res += '(unsupported) ' if fdict.get('language'): if res: res += ' ' res += '[%s] ' % fdict['language'] if fdict.get('format_note') is not None: res += fdict['format_note'] + ' ' if fdict.get('tbr') is not None: res += '%4dk ' % fdict['tbr'] if fdict.get('container') is not None: if res: res += ', ' res += '%s container' % fdict['container'] if (fdict.get('vcodec') is not None and fdict.get('vcodec') != 'none'): if res: res += ', ' res += fdict['vcodec'] if fdict.get('vbr') is not None: res += '@' elif fdict.get('vbr') is not None and fdict.get('abr') is not None: res += 'video@' if fdict.get('vbr') is not None: res += '%4dk' % fdict['vbr'] if fdict.get('fps') is not None: if res: res += ', ' res += '%sfps' % fdict['fps'] if fdict.get('acodec') is not None: if res: res += ', ' if fdict['acodec'] == 'none': res += 'video only' else: res += '%-5s' % fdict['acodec'] elif fdict.get('abr') is not None: if res: res += ', ' res += 'audio' if fdict.get('abr') is not None: res += '@%3dk' % fdict['abr'] if fdict.get('asr') is not None: res += ' (%5dHz)' % fdict['asr'] if fdict.get('filesize') is not None: if res: res += ', ' res += format_bytes(fdict['filesize']) elif fdict.get('filesize_approx') is not None: if res: res += ', ' res += '~' + format_bytes(fdict['filesize_approx']) return res def list_formats(self, info_dict): formats = info_dict.get('formats', [info_dict]) table = [ [f['format_id'], f['ext'], self.format_resolution(f), self._format_note(f)] for f in formats if f.get('preference') is None or f['preference'] >= -1000] if len(formats) > 1: table[-1][-1] += (' ' if table[-1][-1] else '') + '(best)' header_line = ['format code', 'extension', 'resolution', 'note'] self.to_screen( '[info] Available formats for %s:\n%s' % (info_dict['id'], render_table(header_line, table))) def list_thumbnails(self, info_dict): thumbnails = info_dict.get('thumbnails') if not thumbnails: self.to_screen('[info] No thumbnails present for %s' % info_dict['id']) return self.to_screen( '[info] Thumbnails for %s:' % info_dict['id']) self.to_screen(render_table( ['ID', 'width', 'height', 'URL'], [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails])) def list_subtitles(self, video_id, subtitles, name='subtitles'): if not subtitles: self.to_screen('%s has no %s' % (video_id, name)) return self.to_screen( 'Available %s for %s:' % (name, video_id)) self.to_screen(render_table( ['Language', 'formats'], [[lang, ', '.join(f['ext'] for f in reversed(formats))] for lang, formats in subtitles.items()])) def urlopen(self, req): """ Start an HTTP download """ if isinstance(req, compat_basestring): req = sanitized_Request(req) return self._opener.open(req, timeout=self._socket_timeout) def print_debug_header(self): if not self.params.get('verbose'): return if type('') is not compat_str: # Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326) self.report_warning( 'Your Python is broken! Update to a newer and supported version') stdout_encoding = getattr( sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__) encoding_str = ( '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % ( locale.getpreferredencoding(), sys.getfilesystemencoding(), stdout_encoding, self.get_encoding())) write_string(encoding_str, encoding=None) self._write_string('[debug] youtube-dl version ' + __version__ + '\n') if _LAZY_LOADER: self._write_string('[debug] Lazy loading extractors enabled' + '\n') try: sp = subprocess.Popen( ['git', 'rev-parse', '--short', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=os.path.dirname(os.path.abspath(__file__))) out, err = sp.communicate() out = out.decode().strip() if re.match('[0-9a-f]+', out): self._write_string('[debug] Git HEAD: ' + out + '\n') except Exception: try: sys.exc_clear() except Exception: pass self._write_string('[debug] Python version %s - %s\n' % ( platform.python_version(), platform_name())) exe_versions = FFmpegPostProcessor.get_versions(self) exe_versions['rtmpdump'] = rtmpdump_version() exe_str = ', '.join( '%s %s' % (exe, v) for exe, v in sorted(exe_versions.items()) if v ) if not exe_str: exe_str = 'none' self._write_string('[debug] exe versions: %s\n' % exe_str) proxy_map = {} for handler in self._opener.handlers: if hasattr(handler, 'proxies'): proxy_map.update(handler.proxies) self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n') if self.params.get('call_home', False): ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8') self._write_string('[debug] Public IP address: %s\n' % ipaddr) latest_version = self.urlopen( 'https://yt-dl.org/latest/version').read().decode('utf-8') if version_tuple(latest_version) > version_tuple(__version__): self.report_warning( 'You are using an outdated version (newest version: %s)! ' 'See https://yt-dl.org/update if you need help updating.' % latest_version) def _setup_opener(self): timeout_val = self.params.get('socket_timeout') self._socket_timeout = 600 if timeout_val is None else float(timeout_val) opts_cookiefile = self.params.get('cookiefile') opts_proxy = self.params.get('proxy') if opts_cookiefile is None: self.cookiejar = compat_cookiejar.CookieJar() else: opts_cookiefile = expand_path(opts_cookiefile) self.cookiejar = compat_cookiejar.MozillaCookieJar( opts_cookiefile) if os.access(opts_cookiefile, os.R_OK): self.cookiejar.load() cookie_processor = YoutubeDLCookieProcessor(self.cookiejar) if opts_proxy is not None: if opts_proxy == '': proxies = {} else: proxies = {'http': opts_proxy, 'https': opts_proxy} else: proxies = compat_urllib_request.getproxies() # Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805) if 'http' in proxies and 'https' not in proxies: proxies['https'] = proxies['http'] proxy_handler = PerRequestProxyHandler(proxies) debuglevel = 1 if self.params.get('debug_printtraffic') else 0 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel) ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel) data_handler = compat_urllib_request_DataHandler() # When passing our own FileHandler instance, build_opener won't add the # default FileHandler and allows us to disable the file protocol, which # can be used for malicious purposes (see # https://github.com/rg3/youtube-dl/issues/8227) file_handler = compat_urllib_request.FileHandler() def file_open(*args, **kwargs): raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in youtube-dl for security reasons') file_handler.file_open = file_open opener = compat_urllib_request.build_opener( proxy_handler, https_handler, cookie_processor, ydlh, data_handler, file_handler) # Delete the default user-agent header, which would otherwise apply in # cases where our custom HTTP handler doesn't come into play # (See https://github.com/rg3/youtube-dl/issues/1309 for details) opener.addheaders = [] self._opener = opener def encode(self, s): if isinstance(s, bytes): return s # Already encoded try: return s.encode(self.get_encoding()) except UnicodeEncodeError as err: err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.' raise def get_encoding(self): encoding = self.params.get('encoding') if encoding is None: encoding = preferredencoding() return encoding def _write_thumbnails(self, info_dict, filename): if self.params.get('writethumbnail', False): thumbnails = info_dict.get('thumbnails') if thumbnails: thumbnails = [thumbnails[-1]] elif self.params.get('write_all_thumbnails', False): thumbnails = info_dict.get('thumbnails') else: return if not thumbnails: # No thumbnails present, so return immediately return for t in thumbnails: thumb_ext = determine_ext(t['url'], 'jpg') suffix = '_%s' % t['id'] if len(thumbnails) > 1 else '' thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else '' t['filename'] = thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)): self.to_screen('[%s] %s: Thumbnail %sis already present' % (info_dict['extractor'], info_dict['id'], thumb_display_id)) else: self.to_screen('[%s] %s: Downloading thumbnail %s...' % (info_dict['extractor'], info_dict['id'], thumb_display_id)) try: uf = self.urlopen(t['url']) with open(encodeFilename(thumb_filename), 'wb') as thumbf: shutil.copyfileobj(uf, thumbf) self.to_screen('[%s] %s: Writing thumbnail %sto: %s' % (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename)) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_warning('Unable to download thumbnail "%s": %s' % (t['url'], error_to_compat_str(err))) #!/usr/bin/env python # coding: utf-8 from __future__ import absolute_import, unicode_literals import collections import contextlib import copy import datetime import errno import fileinput import io import itertools import json import locale import operator import os import platform import re import shutil import subprocess import socket import sys import time import tokenize import traceback import random from string import ascii_letters from .compat import ( compat_basestring, compat_cookiejar, compat_get_terminal_size, compat_http_client, compat_kwargs, compat_numeric_types, compat_os_name, compat_str, compat_tokenize_tokenize, compat_urllib_error, compat_urllib_request, compat_urllib_request_DataHandler, ) from .utils import ( age_restricted, args_to_str, ContentTooShortError, date_from_str, DateRange, DEFAULT_OUTTMPL, determine_ext, determine_protocol, DownloadError, encode_compat_str, encodeFilename, error_to_compat_str, expand_path, ExtractorError, format_bytes, formatSeconds, GeoRestrictedError, int_or_none, ISO3166Utils, locked_file, make_HTTPS_handler, MaxDownloadsReached, PagedList, parse_filesize, PerRequestProxyHandler, platform_name, PostProcessingError, preferredencoding, prepend_extension, register_socks_protocols, render_table, replace_extension, SameFileError, sanitize_filename, sanitize_path, sanitize_url, sanitized_Request, std_headers, subtitles_filename, UnavailableVideoError, url_basename, version_tuple, write_json_file, write_string, YoutubeDLCookieProcessor, YoutubeDLHandler, ) from .cache import Cache from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER from .downloader import get_suitable_downloader from .downloader.rtmp import rtmpdump_version from .postprocessor import ( FFmpegFixupM3u8PP, FFmpegFixupM4aPP, FFmpegFixupStretchedPP, FFmpegMergerPP, FFmpegPostProcessor, get_postprocessor, ) from .version import __version__ if compat_os_name == 'nt': import ctypes class YoutubeDL(object): """YoutubeDL class. YoutubeDL objects are the ones responsible of downloading the actual video file and writing it to disk if the user has requested it, among some other tasks. In most cases there should be one per program. As, given a video URL, the downloader doesn't know how to extract all the needed information, task that InfoExtractors do, it has to pass the URL to one of them. For this, YoutubeDL objects have a method that allows InfoExtractors to be registered in a given order. When it is passed a URL, the YoutubeDL object handles it to the first InfoExtractor it finds that reports being able to handle it. The InfoExtractor extracts all the information about the video or videos the URL refers to, and YoutubeDL process the extracted information, possibly using a File Downloader to download the video. YoutubeDL objects accept a lot of parameters. In order not to saturate the object constructor with arguments, it receives a dictionary of options instead. These options are available through the params attribute for the InfoExtractors to use. The YoutubeDL also registers itself as the downloader in charge for the InfoExtractors that are added to it, so this is a "mutual registration". Available options: username: Username for authentication purposes. password: Password for authentication purposes. videopassword: Password for accessing a video. ap_mso: Adobe Pass multiple-system operator identifier. ap_username: Multiple-system operator account username. ap_password: Multiple-system operator account password. usenetrc: Use netrc for authentication instead. verbose: Print additional info to stdout. quiet: Do not print messages to stdout. no_warnings: Do not print out anything for warnings. forceurl: Force printing final URL. forcetitle: Force printing title. forceid: Force printing ID. forcethumbnail: Force printing thumbnail URL. forcedescription: Force printing description. forcefilename: Force printing final filename. forceduration: Force printing duration. forcejson: Force printing info_dict as JSON. dump_single_json: Force printing the info_dict of the whole playlist (or video) as a single JSON line. simulate: Do not download the video files. format: Video format code. See options.py for more information. outtmpl: Template for output names. restrictfilenames: Do not allow "&" and spaces in file names ignoreerrors: Do not stop on download errors. force_generic_extractor: Force downloader to use the generic extractor nooverwrites: Prevent overwriting files. playliststart: Playlist item to start at. playlistend: Playlist item to end at. playlist_items: Specific indices of playlist to download. playlistreverse: Download playlist items in reverse order. playlistrandom: Download playlist items in random order. matchtitle: Download only matching titles. rejecttitle: Reject downloads for matching titles. logger: Log messages to a logging.Logger instance. logtostderr: Log messages to stderr instead of stdout. writedescription: Write the video description to a .description file writeinfojson: Write the video description to a .info.json file writeannotations: Write the video annotations to a .annotations.xml file writethumbnail: Write the thumbnail image to a file write_all_thumbnails: Write all thumbnail formats to files writesubtitles: Write the video subtitles to a file writeautomaticsub: Write the automatically generated subtitles to a file allsubtitles: Downloads all the subtitles of the video (requires writesubtitles or writeautomaticsub) listsubtitles: Lists all available subtitles for the video subtitlesformat: The format code for subtitles subtitleslangs: List of languages of the subtitles to download keepvideo: Keep the video file after post-processing daterange: A DateRange object, download only if the upload_date is in the range. skip_download: Skip the actual download of the video file cachedir: Location of the cache files in the filesystem. False to disable filesystem cache. noplaylist: Download single video instead of a playlist if in doubt. age_limit: An integer representing the user's age in years. Unsuitable videos for the given age are skipped. min_views: An integer representing the minimum view count the video must have in order to not be skipped. Videos without view count information are always downloaded. None for no limit. max_views: An integer representing the maximum view count. Videos that are more popular than that are not downloaded. Videos without view count information are always downloaded. None for no limit. download_archive: File name of a file where all downloads are recorded. Videos already present in the file are not downloaded again. cookiefile: File name where cookies should be read from and dumped to. nocheckcertificate:Do not verify SSL certificates prefer_insecure: Use HTTP instead of HTTPS to retrieve information. At the moment, this is only supported by YouTube. proxy: URL of the proxy server to use geo_verification_proxy: URL of the proxy to use for IP address verification on geo-restricted sites. (Experimental) socket_timeout: Time to wait for unresponsive hosts, in seconds bidi_workaround: Work around buggy terminals without bidirectional text support, using fridibi debug_printtraffic:Print out sent and received HTTP traffic include_ads: Download ads as well default_search: Prepend this string if an input url is not valid. 'auto' for elaborate guessing encoding: Use this encoding instead of the system-specified. extract_flat: Do not resolve URLs, return the immediate result. Pass in 'in_playlist' to only show this behavior for playlist items. postprocessors: A list of dictionaries, each with an entry * key: The name of the postprocessor. See youtube_dl/postprocessor/__init__.py for a list. as well as any further keyword arguments for the postprocessor. progress_hooks: A list of functions that get called on download progress, with a dictionary with the entries * status: One of "downloading", "error", or "finished". Check this first and ignore unknown values. If status is one of "downloading", or "finished", the following properties may also be present: * filename: The final filename (always present) * tmpfilename: The filename we're currently writing to * downloaded_bytes: Bytes on disk * total_bytes: Size of the whole file, None if unknown * total_bytes_estimate: Guess of the eventual file size, None if unavailable. * elapsed: The number of seconds since download started. * eta: The estimated time in seconds, None if unknown * speed: The download speed in bytes/second, None if unknown * fragment_index: The counter of the currently downloaded video fragment. * fragment_count: The number of fragments (= individual files that will be merged) Progress hooks are guaranteed to be called at least once (with status "finished") if the download is successful. merge_output_format: Extension to use when merging formats. fixup: Automatically correct known faults of the file. One of: - "never": do nothing - "warn": only emit a warning - "detect_or_warn": check whether we can do anything about it, warn otherwise (default) source_address: (Experimental) Client-side IP address to bind to. call_home: Boolean, true iff we are allowed to contact the youtube-dl servers for debugging. sleep_interval: Number of seconds to sleep before each download when used alone or a lower bound of a range for randomized sleep before each download (minimum possible number of seconds to sleep) when used along with max_sleep_interval. max_sleep_interval:Upper bound of a range for randomized sleep before each download (maximum possible number of seconds to sleep). Must only be used along with sleep_interval. Actual sleep time will be a random float from range [sleep_interval; max_sleep_interval]. listformats: Print an overview of available video formats and exit. list_thumbnails: Print a table of all thumbnails and exit. match_filter: A function that gets called with the info_dict of every video. If it returns a message, the video is ignored. If it returns None, the video is downloaded. match_filter_func in utils.py is one example for this. no_color: Do not emit color codes in output. geo_bypass: Bypass geographic restriction via faking X-Forwarded-For HTTP header (experimental) geo_bypass_country: Two-letter ISO 3166-2 country code that will be used for explicit geographic restriction bypassing via faking X-Forwarded-For HTTP header (experimental) The following options determine which downloader is picked: external_downloader: Executable of the external downloader to call. None or unset for standard (built-in) downloader. hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv if True, otherwise use ffmpeg/avconv if False, otherwise use downloader suggested by extractor if None. The following parameters are not used by YoutubeDL itself, they are used by the downloader (see youtube_dl/downloader/common.py): nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test, noresizebuffer, retries, continuedl, noprogress, consoletitle, xattr_set_filesize, external_downloader_args, hls_use_mpegts. The following options are used by the post processors: prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available, otherwise prefer avconv. postprocessor_args: A list of additional command-line arguments for the postprocessor. """ _NUMERIC_FIELDS = set(( 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx', 'timestamp', 'upload_year', 'upload_month', 'upload_day', 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count', 'average_rating', 'comment_count', 'age_limit', 'start_time', 'end_time', 'chapter_number', 'season_number', 'episode_number', 'track_number', 'disc_number', 'release_year', 'playlist_index', )) params = None _ies = [] _pps = [] _download_retcode = None _num_downloads = None _screen_file = None def __init__(self, params=None, auto_init=True): """Create a FileDownloader object with the given options.""" if params is None: params = {} self._ies = [] self._ies_instances = {} self._pps = [] self._progress_hooks = [] self._download_retcode = 0 self._num_downloads = 0 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] self._err_file = sys.stderr self.params = { # Default parameters 'nocheckcertificate': False, } self.params.update(params) self.cache = Cache(self) def check_deprecated(param, option, suggestion): if self.params.get(param) is not None: self.report_warning( '%s is deprecated. Use %s instead.' % (option, suggestion)) return True return False if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'): if self.params.get('geo_verification_proxy') is None: self.params['geo_verification_proxy'] = self.params['cn_verification_proxy'] check_deprecated('autonumber_size', '--autonumber-size', 'output template with %(autonumber)0Nd, where N in the number of digits') check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"') check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"') if params.get('bidi_workaround', False): try: import pty master, slave = pty.openpty() width = compat_get_terminal_size().columns if width is None: width_args = [] else: width_args = ['-w', str(width)] sp_kwargs = dict( stdin=subprocess.PIPE, stdout=slave, stderr=self._err_file) try: self._output_process = subprocess.Popen( ['bidiv'] + width_args, **sp_kwargs ) except OSError: self._output_process = subprocess.Popen( ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs) self._output_channel = os.fdopen(master, 'rb') except OSError as ose: if ose.errno == errno.ENOENT: self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.') else: raise if (sys.platform != 'win32' and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] and not params.get('restrictfilenames', False)): # Unicode filesystem API will throw errors (#1474, #13027) self.report_warning( 'Assuming --restrict-filenames since file system encoding ' 'cannot encode all characters. ' 'Set the LC_ALL environment variable to fix this.') self.params['restrictfilenames'] = True if isinstance(params.get('outtmpl'), bytes): self.report_warning( 'Parameter outtmpl is bytes, but should be a unicode string. ' 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.') self._setup_opener() if auto_init: self.print_debug_header() self.add_default_info_extractors() for pp_def_raw in self.params.get('postprocessors', []): pp_class = get_postprocessor(pp_def_raw['key']) pp_def = dict(pp_def_raw) del pp_def['key'] pp = pp_class(self, **compat_kwargs(pp_def)) self.add_post_processor(pp) for ph in self.params.get('progress_hooks', []): self.add_progress_hook(ph) register_socks_protocols() def warn_if_short_id(self, argv): # short YouTube ID starting with dash? idxs = [ i for i, a in enumerate(argv) if re.match(r'^-[0-9A-Za-z_-]{10}$', a)] if idxs: correct_argv = ( ['youtube-dl'] + [a for i, a in enumerate(argv) if i not in idxs] + ['--'] + [argv[i] for i in idxs] ) self.report_warning( 'Long argument string detected. ' 'Use -- to separate parameters and URLs, like this:\n%s\n' % args_to_str(correct_argv)) def add_info_extractor(self, ie): """Add an InfoExtractor object to the end of the list.""" self._ies.append(ie) if not isinstance(ie, type): self._ies_instances[ie.ie_key()] = ie ie.set_downloader(self) def get_info_extractor(self, ie_key): """ Get an instance of an IE with name ie_key, it will try to get one from the _ies list, if there's no instance it will create a new one and add it to the extractor list. """ ie = self._ies_instances.get(ie_key) if ie is None: ie = get_info_extractor(ie_key)() self.add_info_extractor(ie) return ie def add_default_info_extractors(self): """ Add the InfoExtractors returned by gen_extractors to the end of the list """ for ie in gen_extractor_classes(): self.add_info_extractor(ie) def add_post_processor(self, pp): """Add a PostProcessor object to the end of the chain.""" self._pps.append(pp) pp.set_downloader(self) def add_progress_hook(self, ph): """Add the progress hook (currently only for the file downloader)""" self._progress_hooks.append(ph) def _bidi_workaround(self, message): if not hasattr(self, '_output_channel'): return message assert hasattr(self, '_output_process') assert isinstance(message, compat_str) line_count = message.count('\n') + 1 self._output_process.stdin.write((message + '\n').encode('utf-8')) self._output_process.stdin.flush() res = ''.join(self._output_channel.readline().decode('utf-8') for _ in range(line_count)) return res[:-len('\n')] def to_screen(self, message, skip_eol=False): """Print message to stdout if not in quiet mode.""" return self.to_stdout(message, skip_eol, check_quiet=True) def _write_string(self, s, out=None): write_string(s, out=out, encoding=self.params.get('encoding')) def to_stdout(self, message, skip_eol=False, check_quiet=False): """Print message to stdout if not in quiet mode.""" if self.params.get('logger'): self.params['logger'].debug(message) elif not check_quiet or not self.params.get('quiet', False): message = self._bidi_workaround(message) terminator = ['\n', ''][skip_eol] output = message + terminator self._write_string(output, self._screen_file) def to_stderr(self, message): """Print message to stderr.""" assert isinstance(message, compat_str) if self.params.get('logger'): self.params['logger'].error(message) else: message = self._bidi_workaround(message) output = message + '\n' self._write_string(output, self._err_file) def to_console_title(self, message): if not self.params.get('consoletitle', False): return if compat_os_name == 'nt': if ctypes.windll.kernel32.GetConsoleWindow(): # c_wchar_p() might not be necessary if `message` is # already of type unicode() ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) elif 'TERM' in os.environ: self._write_string('\033]0;%s\007' % message, self._screen_file) def save_console_title(self): if not self.params.get('consoletitle', False): return if compat_os_name != 'nt' and 'TERM' in os.environ: # Save the title on stack self._write_string('\033[22;0t', self._screen_file) def restore_console_title(self): if not self.params.get('consoletitle', False): return if compat_os_name != 'nt' and 'TERM' in os.environ: # Restore the title from stack self._write_string('\033[23;0t', self._screen_file) def __enter__(self): self.save_console_title() return self def __exit__(self, *args): self.restore_console_title() if self.params.get('cookiefile') is not None: self.cookiejar.save() def trouble(self, message=None, tb=None): """Determine action to take when a download problem appears. Depending on if the downloader has been configured to ignore download errors or not, this method may throw an exception or not when errors are found, after printing the message. tb, if given, is additional traceback information. """ if message is not None: self.to_stderr(message) if self.params.get('verbose'): if tb is None: if sys.exc_info()[0]: # if .trouble has been called from an except block tb = '' if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info)) tb += encode_compat_str(traceback.format_exc()) else: tb_data = traceback.format_list(traceback.extract_stack()) tb = ''.join(tb_data) self.to_stderr(tb) if not self.params.get('ignoreerrors', False): if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: exc_info = sys.exc_info()[1].exc_info else: exc_info = sys.exc_info() raise DownloadError(message, exc_info) self._download_retcode = 1 def report_warning(self, message): ''' Print the message to stderr, it will be prefixed with 'WARNING:' If stderr is a tty file the 'WARNING:' will be colored ''' if self.params.get('logger') is not None: self.params['logger'].warning(message) else: if self.params.get('no_warnings'): return if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt': _msg_header = '\033[0;33mWARNING:\033[0m' else: _msg_header = 'WARNING:' warning_message = '%s %s' % (_msg_header, message) self.to_stderr(warning_message) def report_error(self, message, tb=None): ''' Do the same as trouble, but prefixes the message with 'ERROR:', colored in red if stderr is a tty file. ''' if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt': _msg_header = '\033[0;31mERROR:\033[0m' else: _msg_header = 'ERROR:' error_message = '%s %s' % (_msg_header, message) self.trouble(error_message, tb) def report_file_already_downloaded(self, file_name): """Report file has already been fully downloaded.""" try: self.to_screen('[download] %s has already been downloaded' % file_name) except UnicodeEncodeError: self.to_screen('[download] The file has already been downloaded') def prepare_filename(self, info_dict): """Generate the output filename.""" try: template_dict = dict(info_dict) template_dict['epoch'] = int(time.time()) autonumber_size = self.params.get('autonumber_size') if autonumber_size is None: autonumber_size = 5 template_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads if template_dict.get('resolution') is None: if template_dict.get('width') and template_dict.get('height'): template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height']) elif template_dict.get('height'): template_dict['resolution'] = '%sp' % template_dict['height'] elif template_dict.get('width'): template_dict['resolution'] = '%dx?' % template_dict['width'] sanitize = lambda k, v: sanitize_filename( compat_str(v), restricted=self.params.get('restrictfilenames'), is_id=(k == 'id' or k.endswith('_id'))) template_dict = dict((k, v if isinstance(v, compat_numeric_types) else sanitize(k, v)) for k, v in template_dict.items() if v is not None and not isinstance(v, (list, tuple, dict))) template_dict = collections.defaultdict(lambda: 'NA', template_dict) outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) # For fields playlist_index and autonumber convert all occurrences # of %(field)s to %(field)0Nd for backward compatibility field_size_compat_map = { 'playlist_index': len(str(template_dict['n_entries'])), 'autonumber': autonumber_size, } FIELD_SIZE_COMPAT_RE = r'(?<!%)%\((?P<field>autonumber|playlist_index)\)s' mobj = re.search(FIELD_SIZE_COMPAT_RE, outtmpl) if mobj: outtmpl = re.sub( FIELD_SIZE_COMPAT_RE, r'%%(\1)0%dd' % field_size_compat_map[mobj.group('field')], outtmpl) # Missing numeric fields used together with integer presentation types # in format specification will break the argument substitution since # string 'NA' is returned for missing fields. We will patch output # template for missing fields to meet string presentation type. for numeric_field in self._NUMERIC_FIELDS: if numeric_field not in template_dict: # As of [1] format syntax is: # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting FORMAT_RE = r'''(?x) (?<!%) % \({0}\) # mapping key (?:[#0\-+ ]+)? # conversion flags (optional) (?:\d+)? # minimum field width (optional) (?:\.\d+)? # precision (optional) [hlL]? # length modifier (optional) [diouxXeEfFgGcrs%] # conversion type ''' outtmpl = re.sub( FORMAT_RE.format(numeric_field), r'%({0})s'.format(numeric_field), outtmpl) # expand_path translates '%%' into '%' and '$$' into '$' # correspondingly that is not what we want since we need to keep # '%%' intact for template dict substitution step. Working around # with boundary-alike separator hack. sep = ''.join([random.choice(ascii_letters) for _ in range(32)]) outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep)) # outtmpl should be expand_path'ed before template dict substitution # because meta fields may contain env variables we don't want to # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and # title "Hello $PATH", we don't want `$PATH` to be expanded. filename = expand_path(outtmpl).replace(sep, '') % template_dict # Temporary fix for #4787 # 'Treat' all problem characters by passing filename through preferredencoding # to workaround encoding issues with subprocess on python2 @ Windows if sys.version_info < (3, 0) and sys.platform == 'win32': filename = encodeFilename(filename, True).decode(preferredencoding()) return sanitize_path(filename) except ValueError as err: self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')') return None def _match_entry(self, info_dict, incomplete): """ Returns None iff the file should be downloaded """ video_title = info_dict.get('title', info_dict.get('id', 'video')) if 'title' in info_dict: # This can happen when we're just evaluating the playlist title = info_dict['title'] matchtitle = self.params.get('matchtitle', False) if matchtitle: if not re.search(matchtitle, title, re.IGNORECASE): return '"' + title + '" title did not match pattern "' + matchtitle + '"' rejecttitle = self.params.get('rejecttitle', False) if rejecttitle: if re.search(rejecttitle, title, re.IGNORECASE): return '"' + title + '" title matched reject pattern "' + rejecttitle + '"' date = info_dict.get('upload_date') if date is not None: dateRange = self.params.get('daterange', DateRange()) if date not in dateRange: return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange) view_count = info_dict.get('view_count') if view_count is not None: min_views = self.params.get('min_views') if min_views is not None and view_count < min_views: return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views) max_views = self.params.get('max_views') if max_views is not None and view_count > max_views: return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views) if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')): return 'Skipping "%s" because it is age restricted' % video_title if self.in_download_archive(info_dict): return '%s has already been recorded in archive' % video_title if not incomplete: match_filter = self.params.get('match_filter') if match_filter is not None: ret = match_filter(info_dict) if ret is not None: return ret return None @staticmethod def add_extra_info(info_dict, extra_info): '''Set the keys from extra_info in info dict if they are missing''' for key, value in extra_info.items(): info_dict.setdefault(key, value) def extract_info(self, url, download=True, ie_key=None, extra_info={}, process=True, force_generic_extractor=False): ''' Returns a list with a dictionary for each video we find. If 'download', also downloads the videos. extra_info is a dict containing the extra values to add to each result ''' if not ie_key and force_generic_extractor: ie_key = 'Generic' if ie_key: ies = [self.get_info_extractor(ie_key)] else: ies = self._ies for ie in ies: if not ie.suitable(url): continue ie = self.get_info_extractor(ie.ie_key()) if not ie.working(): self.report_warning('The program functionality for this site has been marked as broken, ' 'and will probably not work.') try: ie_result = ie.extract(url) if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here) break if isinstance(ie_result, list): # Backwards compatibility: old IE result format ie_result = { '_type': 'compat_list', 'entries': ie_result, } self.add_default_extra_info(ie_result, ie, url) if process: return self.process_ie_result(ie_result, download, extra_info) else: return ie_result except GeoRestrictedError as e: msg = e.msg if e.countries: msg += '\nThis video is available in %s.' % ', '.join( map(ISO3166Utils.short2full, e.countries)) msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.' self.report_error(msg) break except ExtractorError as e: # An error we somewhat expected self.report_error(compat_str(e), e.format_traceback()) break except MaxDownloadsReached: raise except Exception as e: if self.params.get('ignoreerrors', False): self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc())) break else: raise else: self.report_error('no suitable InfoExtractor for URL %s' % url) def add_default_extra_info(self, ie_result, ie, url): self.add_extra_info(ie_result, { 'extractor': ie.IE_NAME, 'webpage_url': url, 'webpage_url_basename': url_basename(url), 'extractor_key': ie.ie_key(), }) def process_ie_result(self, ie_result, download=True, extra_info={}): """ Take the result of the ie(may be modified) and resolve all unresolved references (URLs, playlist items). It will also download the videos if 'download'. Returns the resolved ie_result. """ result_type = ie_result.get('_type', 'video') if result_type in ('url', 'url_transparent'): ie_result['url'] = sanitize_url(ie_result['url']) extract_flat = self.params.get('extract_flat', False) if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or extract_flat is True): if self.params.get('forcejson', False): self.to_stdout(json.dumps(ie_result)) return ie_result if result_type == 'video': self.add_extra_info(ie_result, extra_info) return self.process_video_result(ie_result, download=download) elif result_type == 'url': # We have to add extra_info to the results because it may be # contained in a playlist return self.extract_info(ie_result['url'], download, ie_key=ie_result.get('ie_key'), extra_info=extra_info) elif result_type == 'url_transparent': # Use the information from the embedding page info = self.extract_info( ie_result['url'], ie_key=ie_result.get('ie_key'), extra_info=extra_info, download=False, process=False) # extract_info may return None when ignoreerrors is enabled and # extraction failed with an error, don't crash and return early # in this case if not info: return info force_properties = dict( (k, v) for k, v in ie_result.items() if v is not None) for f in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'): if f in force_properties: del force_properties[f] new_result = info.copy() new_result.update(force_properties) # Extracted info may not be a video result (i.e. # info.get('_type', 'video') != video) but rather an url or # url_transparent. In such cases outer metadata (from ie_result) # should be propagated to inner one (info). For this to happen # _type of info should be overridden with url_transparent. This # fixes issue from https://github.com/rg3/youtube-dl/pull/11163. if new_result.get('_type') == 'url': new_result['_type'] = 'url_transparent' return self.process_ie_result( new_result, download=download, extra_info=extra_info) elif result_type in ('playlist', 'multi_video'): # We process each entry in the playlist playlist = ie_result.get('title') or ie_result.get('id') self.to_screen('[download] Downloading playlist: %s' % playlist) playlist_results = [] playliststart = self.params.get('playliststart', 1) - 1 playlistend = self.params.get('playlistend') # For backwards compatibility, interpret -1 as whole list if playlistend == -1: playlistend = None playlistitems_str = self.params.get('playlist_items') playlistitems = None if playlistitems_str is not None: def iter_playlistitems(format): for string_segment in format.split(','): if '-' in string_segment: start, end = string_segment.split('-') for item in range(int(start), int(end) + 1): yield int(item) else: yield int(string_segment) playlistitems = iter_playlistitems(playlistitems_str) ie_entries = ie_result['entries'] if isinstance(ie_entries, list): n_all_entries = len(ie_entries) if playlistitems: entries = [ ie_entries[i - 1] for i in playlistitems if -n_all_entries <= i - 1 < n_all_entries] else: entries = ie_entries[playliststart:playlistend] n_entries = len(entries) self.to_screen( '[%s] playlist %s: Collected %d video ids (downloading %d of them)' % (ie_result['extractor'], playlist, n_all_entries, n_entries)) elif isinstance(ie_entries, PagedList): if playlistitems: entries = [] for item in playlistitems: entries.extend(ie_entries.getslice( item - 1, item )) else: entries = ie_entries.getslice( playliststart, playlistend) n_entries = len(entries) self.to_screen( '[%s] playlist %s: Downloading %d videos' % (ie_result['extractor'], playlist, n_entries)) else: # iterable if playlistitems: entry_list = list(ie_entries) entries = [entry_list[i - 1] for i in playlistitems] else: entries = list(itertools.islice( ie_entries, playliststart, playlistend)) n_entries = len(entries) self.to_screen( '[%s] playlist %s: Downloading %d videos' % (ie_result['extractor'], playlist, n_entries)) if self.params.get('playlistreverse', False): entries = entries[::-1] if self.params.get('playlistrandom', False): random.shuffle(entries) x_forwarded_for = ie_result.get('__x_forwarded_for_ip') for i, entry in enumerate(entries, 1): self.to_screen('[download] Downloading video %s of %s' % (i, n_entries)) # This __x_forwarded_for_ip thing is a bit ugly but requires # minimal changes if x_forwarded_for: entry['__x_forwarded_for_ip'] = x_forwarded_for extra = { 'n_entries': n_entries, 'playlist': playlist, 'playlist_id': ie_result.get('id'), 'playlist_title': ie_result.get('title'), 'playlist_index': i + playliststart, 'extractor': ie_result['extractor'], 'webpage_url': ie_result['webpage_url'], 'webpage_url_basename': url_basename(ie_result['webpage_url']), 'extractor_key': ie_result['extractor_key'], } reason = self._match_entry(entry, incomplete=True) if reason is not None: self.to_screen('[download] ' + reason) continue entry_result = self.process_ie_result(entry, download=download, extra_info=extra) playlist_results.append(entry_result) ie_result['entries'] = playlist_results self.to_screen('[download] Finished downloading playlist: %s' % playlist) return ie_result elif result_type == 'compat_list': self.report_warning( 'Extractor %s returned a compat_list result. ' 'It needs to be updated.' % ie_result.get('extractor')) def _fixup(r): self.add_extra_info( r, { 'extractor': ie_result['extractor'], 'webpage_url': ie_result['webpage_url'], 'webpage_url_basename': url_basename(ie_result['webpage_url']), 'extractor_key': ie_result['extractor_key'], } ) return r ie_result['entries'] = [ self.process_ie_result(_fixup(r), download, extra_info) for r in ie_result['entries'] ] return ie_result else: raise Exception('Invalid result type: %s' % result_type) def _build_format_filter(self, filter_spec): " Returns a function to filter the formats according to the filter_spec " OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>width|height|tbr|abr|vbr|asr|filesize|fps) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?) $ ''' % '|'.join(map(re.escape, OPERATORS.keys()))) m = operator_rex.search(filter_spec) if m: try: comparison_value = int(m.group('value')) except ValueError: comparison_value = parse_filesize(m.group('value')) if comparison_value is None: comparison_value = parse_filesize(m.group('value') + 'B') if comparison_value is None: raise ValueError( 'Invalid value %r in format specification %r' % ( m.group('value'), filter_spec)) op = OPERATORS[m.group('op')] if not m: STR_OPERATORS = { '=': operator.eq, '!=': operator.ne, '^=': lambda attr, value: attr.startswith(value), '$=': lambda attr, value: attr.endswith(value), '*=': lambda attr, value: value in attr, } str_operator_rex = re.compile(r'''(?x) \s*(?P<key>ext|acodec|vcodec|container|protocol|format_id) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)? \s*(?P<value>[a-zA-Z0-9._-]+) \s*$ ''' % '|'.join(map(re.escape, STR_OPERATORS.keys()))) m = str_operator_rex.search(filter_spec) if m: comparison_value = m.group('value') op = STR_OPERATORS[m.group('op')] if not m: raise ValueError('Invalid filter specification %r' % filter_spec) def _filter(f): actual_value = f.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) return _filter def build_format_selector(self, format_spec): def syntax_error(note, start): message = ( 'Invalid format specification: ' '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1])) return SyntaxError(message) PICKFIRST = 'PICKFIRST' MERGE = 'MERGE' SINGLE = 'SINGLE' GROUP = 'GROUP' FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters']) def _parse_filter(tokens): filter_parts = [] for type, string, start, _, _ in tokens: if type == tokenize.OP and string == ']': return ''.join(filter_parts) else: filter_parts.append(string) def _remove_unused_ops(tokens): # Remove operators that we don't use and join them with the surrounding strings # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9' ALLOWED_OPS = ('/', '+', ',', '(', ')') last_string, last_start, last_end, last_line = None, None, None, None for type, string, start, end, line in tokens: if type == tokenize.OP and string == '[': if last_string: yield tokenize.NAME, last_string, last_start, last_end, last_line last_string = None yield type, string, start, end, line # everything inside brackets will be handled by _parse_filter for type, string, start, end, line in tokens: yield type, string, start, end, line if type == tokenize.OP and string == ']': break elif type == tokenize.OP and string in ALLOWED_OPS: if last_string: yield tokenize.NAME, last_string, last_start, last_end, last_line last_string = None yield type, string, start, end, line elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]: if not last_string: last_string = string last_start = start last_end = end else: last_string += string if last_string: yield tokenize.NAME, last_string, last_start, last_end, last_line def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False): selectors = [] current_selector = None for type, string, start, _, _ in tokens: # ENCODING is only defined in python 3.x if type == getattr(tokenize, 'ENCODING', None): continue elif type in [tokenize.NAME, tokenize.NUMBER]: current_selector = FormatSelector(SINGLE, string, []) elif type == tokenize.OP: if string == ')': if not inside_group: # ')' will be handled by the parentheses group tokens.restore_last_token() break elif inside_merge and string in ['/', ',']: tokens.restore_last_token() break elif inside_choice and string == ',': tokens.restore_last_token() break elif string == ',': if not current_selector: raise syntax_error('"," must follow a format selector', start) selectors.append(current_selector) current_selector = None elif string == '/': if not current_selector: raise syntax_error('"/" must follow a format selector', start) first_choice = current_selector second_choice = _parse_format_selection(tokens, inside_choice=True) current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), []) elif string == '[': if not current_selector: current_selector = FormatSelector(SINGLE, 'best', []) format_filter = _parse_filter(tokens) current_selector.filters.append(format_filter) elif string == '(': if current_selector: raise syntax_error('Unexpected "("', start) group = _parse_format_selection(tokens, inside_group=True) current_selector = FormatSelector(GROUP, group, []) elif string == '+': video_selector = current_selector audio_selector = _parse_format_selection(tokens, inside_merge=True) if not video_selector or not audio_selector: raise syntax_error('"+" must be between two format selectors', start) current_selector = FormatSelector(MERGE, (video_selector, audio_selector), []) else: raise syntax_error('Operator not recognized: "{0}"'.format(string), start) elif type == tokenize.ENDMARKER: break if current_selector: selectors.append(current_selector) return selectors def _build_selector_function(selector): if isinstance(selector, list): fs = [_build_selector_function(s) for s in selector] def selector_function(ctx): for f in fs: for format in f(ctx): yield format return selector_function elif selector.type == GROUP: selector_function = _build_selector_function(selector.selector) elif selector.type == PICKFIRST: fs = [_build_selector_function(s) for s in selector.selector] def selector_function(ctx): for f in fs: picked_formats = list(f(ctx)) if picked_formats: return picked_formats return [] elif selector.type == SINGLE: format_spec = selector.selector def selector_function(ctx): formats = list(ctx['formats']) if not formats: return if format_spec == 'all': for f in formats: yield f elif format_spec in ['best', 'worst', None]: format_idx = 0 if format_spec == 'worst' else -1 audiovideo_formats = [ f for f in formats if f.get('vcodec') != 'none' and f.get('acodec') != 'none'] if audiovideo_formats: yield audiovideo_formats[format_idx] # for extractors with incomplete formats (audio only (soundcloud) # or video only (imgur)) we will fallback to best/worst # {video,audio}-only format elif ctx['incomplete_formats']: yield formats[format_idx] elif format_spec == 'bestaudio': audio_formats = [ f for f in formats if f.get('vcodec') == 'none'] if audio_formats: yield audio_formats[-1] elif format_spec == 'worstaudio': audio_formats = [ f for f in formats if f.get('vcodec') == 'none'] if audio_formats: yield audio_formats[0] elif format_spec == 'bestvideo': video_formats = [ f for f in formats if f.get('acodec') == 'none'] if video_formats: yield video_formats[-1] elif format_spec == 'worstvideo': video_formats = [ f for f in formats if f.get('acodec') == 'none'] if video_formats: yield video_formats[0] else: extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav'] if format_spec in extensions: filter_f = lambda f: f['ext'] == format_spec else: filter_f = lambda f: f['format_id'] == format_spec matches = list(filter(filter_f, formats)) if matches: yield matches[-1] elif selector.type == MERGE: def _merge(formats_info): format_1, format_2 = [f['format_id'] for f in formats_info] # The first format must contain the video and the # second the audio if formats_info[0].get('vcodec') == 'none': self.report_error('The first format must ' 'contain the video, try using ' '"-f %s+%s"' % (format_2, format_1)) return # Formats must be opposite (video+audio) if formats_info[0].get('acodec') == 'none' and formats_info[1].get('acodec') == 'none': self.report_error( 'Both formats %s and %s are video-only, you must specify "-f video+audio"' % (format_1, format_2)) return output_ext = ( formats_info[0]['ext'] if self.params.get('merge_output_format') is None else self.params['merge_output_format']) return { 'requested_formats': formats_info, 'format': '%s+%s' % (formats_info[0].get('format'), formats_info[1].get('format')), 'format_id': '%s+%s' % (formats_info[0].get('format_id'), formats_info[1].get('format_id')), 'width': formats_info[0].get('width'), 'height': formats_info[0].get('height'), 'resolution': formats_info[0].get('resolution'), 'fps': formats_info[0].get('fps'), 'vcodec': formats_info[0].get('vcodec'), 'vbr': formats_info[0].get('vbr'), 'stretched_ratio': formats_info[0].get('stretched_ratio'), 'acodec': formats_info[1].get('acodec'), 'abr': formats_info[1].get('abr'), 'ext': output_ext, } video_selector, audio_selector = map(_build_selector_function, selector.selector) def selector_function(ctx): for pair in itertools.product( video_selector(copy.deepcopy(ctx)), audio_selector(copy.deepcopy(ctx))): yield _merge(pair) filters = [self._build_format_filter(f) for f in selector.filters] def final_selector(ctx): ctx_copy = copy.deepcopy(ctx) for _filter in filters: ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats'])) return selector_function(ctx_copy) return final_selector stream = io.BytesIO(format_spec.encode('utf-8')) try: tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline))) except tokenize.TokenError: raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec))) class TokenIterator(object): def __init__(self, tokens): self.tokens = tokens self.counter = 0 def __iter__(self): return self def __next__(self): if self.counter >= len(self.tokens): raise StopIteration() value = self.tokens[self.counter] self.counter += 1 return value next = __next__ def restore_last_token(self): self.counter -= 1 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens))) return _build_selector_function(parsed_selector) def _calc_headers(self, info_dict): res = std_headers.copy() add_headers = info_dict.get('http_headers') if add_headers: res.update(add_headers) cookies = self._calc_cookies(info_dict) if cookies: res['Cookie'] = cookies if 'X-Forwarded-For' not in res: x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip') if x_forwarded_for_ip: res['X-Forwarded-For'] = x_forwarded_for_ip return res def _calc_cookies(self, info_dict): pr = sanitized_Request(info_dict['url']) self.cookiejar.add_cookie_header(pr) return pr.get_header('Cookie') def process_video_result(self, info_dict, download=True): assert info_dict.get('_type', 'video') == 'video' if 'id' not in info_dict: raise ExtractorError('Missing "id" field in extractor result') if 'title' not in info_dict: raise ExtractorError('Missing "title" field in extractor result') def report_force_conversion(field, field_not, conversion): self.report_warning( '"%s" field is not %s - forcing %s conversion, there is an error in extractor' % (field, field_not, conversion)) def sanitize_string_field(info, string_field): field = info.get(string_field) if field is None or isinstance(field, compat_str): return report_force_conversion(string_field, 'a string', 'string') info[string_field] = compat_str(field) def sanitize_numeric_fields(info): for numeric_field in self._NUMERIC_FIELDS: field = info.get(numeric_field) if field is None or isinstance(field, compat_numeric_types): continue report_force_conversion(numeric_field, 'numeric', 'int') info[numeric_field] = int_or_none(field) sanitize_string_field(info_dict, 'id') sanitize_numeric_fields(info_dict) if 'playlist' not in info_dict: # It isn't part of a playlist info_dict['playlist'] = None info_dict['playlist_index'] = None thumbnails = info_dict.get('thumbnails') if thumbnails is None: thumbnail = info_dict.get('thumbnail') if thumbnail: info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}] if thumbnails: thumbnails.sort(key=lambda t: ( t.get('preference') if t.get('preference') is not None else -1, t.get('width') if t.get('width') is not None else -1, t.get('height') if t.get('height') is not None else -1, t.get('id') if t.get('id') is not None else '', t.get('url'))) for i, t in enumerate(thumbnails): t['url'] = sanitize_url(t['url']) if t.get('width') and t.get('height'): t['resolution'] = '%dx%d' % (t['width'], t['height']) if t.get('id') is None: t['id'] = '%d' % i if self.params.get('list_thumbnails'): self.list_thumbnails(info_dict) return thumbnail = info_dict.get('thumbnail') if thumbnail: info_dict['thumbnail'] = sanitize_url(thumbnail) elif thumbnails: info_dict['thumbnail'] = thumbnails[-1]['url'] if 'display_id' not in info_dict and 'id' in info_dict: info_dict['display_id'] = info_dict['id'] if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None: # Working around out-of-range timestamp values (e.g. negative ones on Windows, # see http://bugs.python.org/issue1646728) try: upload_date = datetime.datetime.utcfromtimestamp(info_dict['timestamp']) info_dict['upload_date'] = upload_date.strftime('%Y%m%d') except (ValueError, OverflowError, OSError): pass # Auto generate title fields corresponding to the *_number fields when missing # in order to always have clean titles. This is very common for TV series. for field in ('chapter', 'season', 'episode'): if info_dict.get('%s_number' % field) is not None and not info_dict.get(field): info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field]) subtitles = info_dict.get('subtitles') if subtitles: for _, subtitle in subtitles.items(): for subtitle_format in subtitle: if subtitle_format.get('url'): subtitle_format['url'] = sanitize_url(subtitle_format['url']) if subtitle_format.get('ext') is None: subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower() if self.params.get('listsubtitles', False): if 'automatic_captions' in info_dict: self.list_subtitles(info_dict['id'], info_dict.get('automatic_captions'), 'automatic captions') self.list_subtitles(info_dict['id'], subtitles, 'subtitles') return info_dict['requested_subtitles'] = self.process_subtitles( info_dict['id'], subtitles, info_dict.get('automatic_captions')) # We now pick which formats have to be downloaded if info_dict.get('formats') is None: # There's only one format available formats = [info_dict] else: formats = info_dict['formats'] if not formats: raise ExtractorError('No video formats found!') def is_wellformed(f): url = f.get('url') valid_url = url and isinstance(url, compat_str) if not valid_url: self.report_warning( '"url" field is missing or empty - skipping format, ' 'there is an error in extractor') return valid_url # Filter out malformed formats for better extraction robustness formats = list(filter(is_wellformed, formats)) formats_dict = {} # We check that all the formats have the format and format_id fields for i, format in enumerate(formats): sanitize_string_field(format, 'format_id') sanitize_numeric_fields(format) format['url'] = sanitize_url(format['url']) if format.get('format_id') is None: format['format_id'] = compat_str(i) else: # Sanitize format_id from characters used in format selector expression format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id']) format_id = format['format_id'] if format_id not in formats_dict: formats_dict[format_id] = [] formats_dict[format_id].append(format) # Make sure all formats have unique format_id for format_id, ambiguous_formats in formats_dict.items(): if len(ambiguous_formats) > 1: for i, format in enumerate(ambiguous_formats): format['format_id'] = '%s-%d' % (format_id, i) for i, format in enumerate(formats): if format.get('format') is None: format['format'] = '{id} - {res}{note}'.format( id=format['format_id'], res=self.format_resolution(format), note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '', ) # Automatically determine file extension if missing if format.get('ext') is None: format['ext'] = determine_ext(format['url']).lower() # Automatically determine protocol if missing (useful for format # selection purposes) if format.get('protocol') is None: format['protocol'] = determine_protocol(format) # Add HTTP headers, so that external programs can use them from the # json output full_format_info = info_dict.copy() full_format_info.update(format) format['http_headers'] = self._calc_headers(full_format_info) # Remove private housekeeping stuff if '__x_forwarded_for_ip' in info_dict: del info_dict['__x_forwarded_for_ip'] # TODO Central sorting goes here if formats[0] is not info_dict: # only set the 'formats' fields if the original info_dict list them # otherwise we end up with a circular reference, the first (and unique) # element in the 'formats' field in info_dict is info_dict itself, # which can't be exported to json info_dict['formats'] = formats if self.params.get('listformats'): self.list_formats(info_dict) return req_format = self.params.get('format') if req_format is None: req_format_list = [] if (self.params.get('outtmpl', DEFAULT_OUTTMPL) != '-' and not info_dict.get('is_live')): merger = FFmpegMergerPP(self) if merger.available and merger.can_merge(): req_format_list.append('bestvideo+bestaudio') req_format_list.append('best') req_format = '/'.join(req_format_list) format_selector = self.build_format_selector(req_format) # While in format selection we may need to have an access to the original # format set in order to calculate some metrics or do some processing. # For now we need to be able to guess whether original formats provided # by extractor are incomplete or not (i.e. whether extractor provides only # video-only or audio-only formats) for proper formats selection for # extractors with such incomplete formats (see # https://github.com/rg3/youtube-dl/pull/5556). # Since formats may be filtered during format selection and may not match # the original formats the results may be incorrect. Thus original formats # or pre-calculated metrics should be passed to format selection routines # as well. # We will pass a context object containing all necessary additional data # instead of just formats. # This fixes incorrect format selection issue (see # https://github.com/rg3/youtube-dl/issues/10083). incomplete_formats = ( # All formats are video-only or all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats) or # all formats are audio-only all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)) ctx = { 'formats': formats, 'incomplete_formats': incomplete_formats, } formats_to_download = list(format_selector(ctx)) if not formats_to_download: raise ExtractorError('requested format not available', expected=True) if download: if len(formats_to_download) > 1: self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download))) for format in formats_to_download: new_info = dict(info_dict) new_info.update(format) self.process_info(new_info) # We update the info dict with the best quality format (backwards compatibility) info_dict.update(formats_to_download[-1]) return info_dict def process_subtitles(self, video_id, normal_subtitles, automatic_captions): """Select the requested subtitles and their format""" available_subs = {} if normal_subtitles and self.params.get('writesubtitles'): available_subs.update(normal_subtitles) if automatic_captions and self.params.get('writeautomaticsub'): for lang, cap_info in automatic_captions.items(): if lang not in available_subs: available_subs[lang] = cap_info if (not self.params.get('writesubtitles') and not self.params.get('writeautomaticsub') or not available_subs): return None if self.params.get('allsubtitles', False): requested_langs = available_subs.keys() else: if self.params.get('subtitleslangs', False): requested_langs = self.params.get('subtitleslangs') elif 'en' in available_subs: requested_langs = ['en'] else: requested_langs = [list(available_subs.keys())[0]] formats_query = self.params.get('subtitlesformat', 'best') formats_preference = formats_query.split('/') if formats_query else [] subs = {} for lang in requested_langs: formats = available_subs.get(lang) if formats is None: self.report_warning('%s subtitles not available for %s' % (lang, video_id)) continue for ext in formats_preference: if ext == 'best': f = formats[-1] break matches = list(filter(lambda f: f['ext'] == ext, formats)) if matches: f = matches[-1] break else: f = formats[-1] self.report_warning( 'No subtitle format found matching "%s" for language %s, ' 'using %s' % (formats_query, lang, f['ext'])) subs[lang] = f return subs def process_info(self, info_dict): """Process a single resolved IE result.""" assert info_dict.get('_type', 'video') == 'video' max_downloads = self.params.get('max_downloads') if max_downloads is not None: if self._num_downloads >= int(max_downloads): raise MaxDownloadsReached() info_dict['fulltitle'] = info_dict['title'] if len(info_dict['title']) > 200: info_dict['title'] = info_dict['title'][:197] + '...' if 'format' not in info_dict: info_dict['format'] = info_dict['ext'] reason = self._match_entry(info_dict, incomplete=False) if reason is not None: self.to_screen('[download] ' + reason) return self._num_downloads += 1 info_dict['_filename'] = filename = self.prepare_filename(info_dict) # Forced printings if self.params.get('forcetitle', False): self.to_stdout(info_dict['fulltitle']) if self.params.get('forceid', False): self.to_stdout(info_dict['id']) if self.params.get('forceurl', False): if info_dict.get('requested_formats') is not None: for f in info_dict['requested_formats']: self.to_stdout(f['url'] + f.get('play_path', '')) else: # For RTMP URLs, also include the playpath self.to_stdout(info_dict['url'] + info_dict.get('play_path', '')) if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None: self.to_stdout(info_dict['thumbnail']) if self.params.get('forcedescription', False) and info_dict.get('description') is not None: self.to_stdout(info_dict['description']) if self.params.get('forcefilename', False) and filename is not None: self.to_stdout(filename) if self.params.get('forceduration', False) and info_dict.get('duration') is not None: self.to_stdout(formatSeconds(info_dict['duration'])) if self.params.get('forceformat', False): self.to_stdout(info_dict['format']) if self.params.get('forcejson', False): self.to_stdout(json.dumps(info_dict)) # Do nothing else if in simulate mode if self.params.get('simulate', False): return if filename is None: return try: dn = os.path.dirname(sanitize_path(encodeFilename(filename))) if dn and not os.path.exists(dn): os.makedirs(dn) except (OSError, IOError) as err: self.report_error('unable to create directory ' + error_to_compat_str(err)) return if self.params.get('writedescription', False): descfn = replace_extension(filename, 'description', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)): self.to_screen('[info] Video description is already present') elif info_dict.get('description') is None: self.report_warning('There\'s no description to write.') else: try: self.to_screen('[info] Writing video description to: ' + descfn) with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: descfile.write(info_dict['description']) except (OSError, IOError): self.report_error('Cannot write description file ' + descfn) return if self.params.get('writeannotations', False): annofn = replace_extension(filename, 'annotations.xml', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)): self.to_screen('[info] Video annotations are already present') else: try: self.to_screen('[info] Writing video annotations to: ' + annofn) with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile: annofile.write(info_dict['annotations']) except (KeyError, TypeError): self.report_warning('There are no annotations to write.') except (OSError, IOError): self.report_error('Cannot write annotations file: ' + annofn) return subtitles_are_requested = any([self.params.get('writesubtitles', False), self.params.get('writeautomaticsub')]) if subtitles_are_requested and info_dict.get('requested_subtitles'): # subtitles download errors are already managed as troubles in relevant IE # that way it will silently go on when used with unsupporting IE subtitles = info_dict['requested_subtitles'] ie = self.get_info_extractor(info_dict['extractor_key']) for sub_lang, sub_info in subtitles.items(): sub_format = sub_info['ext'] if sub_info.get('data') is not None: sub_data = sub_info['data'] else: try: sub_data = ie._download_webpage( sub_info['url'], info_dict['id'], note=False) except ExtractorError as err: self.report_warning('Unable to download subtitle for "%s": %s' % (sub_lang, error_to_compat_str(err.cause))) continue try: sub_filename = subtitles_filename(filename, sub_lang, sub_format) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)): self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format)) else: self.to_screen('[info] Writing video subtitles to: ' + sub_filename) # Use newline='' to prevent conversion of newline characters # See https://github.com/rg3/youtube-dl/issues/10268 with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile: subfile.write(sub_data) except (OSError, IOError): self.report_error('Cannot write subtitles file ' + sub_filename) return if self.params.get('writeinfojson', False): infofn = replace_extension(filename, 'info.json', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)): self.to_screen('[info] Video description metadata is already present') else: self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn) try: write_json_file(self.filter_requested_info(info_dict), infofn) except (OSError, IOError): self.report_error('Cannot write metadata to JSON file ' + infofn) return self._write_thumbnails(info_dict, filename) if not self.params.get('skip_download', False): try: def dl(name, info): fd = get_suitable_downloader(info, self.params)(self, self.params) for ph in self._progress_hooks: fd.add_progress_hook(ph) if self.params.get('verbose'): self.to_stdout('[debug] Invoking downloader on %r' % info.get('url')) return fd.download(name, info) if info_dict.get('requested_formats') is not None: downloaded = [] success = True merger = FFmpegMergerPP(self) if not merger.available: postprocessors = [] self.report_warning('You have requested multiple ' 'formats but ffmpeg or avconv are not installed.' ' The formats won\'t be merged.') else: postprocessors = [merger] def compatible_formats(formats): video, audio = formats # Check extension video_ext, audio_ext = audio.get('ext'), video.get('ext') if video_ext and audio_ext: COMPATIBLE_EXTS = ( ('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'), ('webm') ) for exts in COMPATIBLE_EXTS: if video_ext in exts and audio_ext in exts: return True # TODO: Check acodec/vcodec return False filename_real_ext = os.path.splitext(filename)[1][1:] filename_wo_ext = ( os.path.splitext(filename)[0] if filename_real_ext == info_dict['ext'] else filename) requested_formats = info_dict['requested_formats'] if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats): info_dict['ext'] = 'mkv' self.report_warning( 'Requested formats are incompatible for merge and will be merged into mkv.') # Ensure filename always has a correct extension for successful merge filename = '%s.%s' % (filename_wo_ext, info_dict['ext']) if os.path.exists(encodeFilename(filename)): self.to_screen( '[download] %s has already been downloaded and ' 'merged' % filename) else: for f in requested_formats: new_info = dict(info_dict) new_info.update(f) fname = self.prepare_filename(new_info) fname = prepend_extension(fname, 'f%s' % f['format_id'], new_info['ext']) downloaded.append(fname) partial_success = dl(fname, new_info) success = success and partial_success info_dict['__postprocessors'] = postprocessors info_dict['__files_to_merge'] = downloaded else: # Just a single file success = dl(filename, info_dict) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_error('unable to download video data: %s' % error_to_compat_str(err)) return except (OSError, IOError) as err: raise UnavailableVideoError(err) except (ContentTooShortError, ) as err: self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) return if success and filename != '-': # Fixup content fixup_policy = self.params.get('fixup') if fixup_policy is None: fixup_policy = 'detect_or_warn' INSTALL_FFMPEG_MESSAGE = 'Install ffmpeg or avconv to fix this automatically.' stretched_ratio = info_dict.get('stretched_ratio') if stretched_ratio is not None and stretched_ratio != 1: if fixup_policy == 'warn': self.report_warning('%s: Non-uniform pixel ratio (%s)' % ( info_dict['id'], stretched_ratio)) elif fixup_policy == 'detect_or_warn': stretched_pp = FFmpegFixupStretchedPP(self) if stretched_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(stretched_pp) else: self.report_warning( '%s: Non-uniform pixel ratio (%s). %s' % (info_dict['id'], stretched_ratio, INSTALL_FFMPEG_MESSAGE)) else: assert fixup_policy in ('ignore', 'never') if (info_dict.get('requested_formats') is None and info_dict.get('container') == 'm4a_dash'): if fixup_policy == 'warn': self.report_warning( '%s: writing DASH m4a. ' 'Only some players support this container.' % info_dict['id']) elif fixup_policy == 'detect_or_warn': fixup_pp = FFmpegFixupM4aPP(self) if fixup_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(fixup_pp) else: self.report_warning( '%s: writing DASH m4a. ' 'Only some players support this container. %s' % (info_dict['id'], INSTALL_FFMPEG_MESSAGE)) else: assert fixup_policy in ('ignore', 'never') if (info_dict.get('protocol') == 'm3u8_native' or info_dict.get('protocol') == 'm3u8' and self.params.get('hls_prefer_native')): if fixup_policy == 'warn': self.report_warning('%s: malformed AAC bitstream detected.' % ( info_dict['id'])) elif fixup_policy == 'detect_or_warn': fixup_pp = FFmpegFixupM3u8PP(self) if fixup_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(fixup_pp) else: self.report_warning( '%s: malformed AAC bitstream detected. %s' % (info_dict['id'], INSTALL_FFMPEG_MESSAGE)) else: assert fixup_policy in ('ignore', 'never') try: self.post_process(filename, info_dict) except (PostProcessingError) as err: self.report_error('postprocessing: %s' % str(err)) return self.record_download_archive(info_dict) def download(self, url_list): """Download a given list of URLs.""" outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) if (len(url_list) > 1 and outtmpl != '-' and '%' not in outtmpl and self.params.get('max_downloads') != 1): raise SameFileError(outtmpl) for url in url_list: try: # It also downloads the videos res = self.extract_info( url, force_generic_extractor=self.params.get('force_generic_extractor', False)) except UnavailableVideoError: self.report_error('unable to download video') except MaxDownloadsReached: self.to_screen('[info] Maximum number of downloaded files reached.') raise else: if self.params.get('dump_single_json', False): self.to_stdout(json.dumps(res)) return self._download_retcode def download_with_info_file(self, info_filename): with contextlib.closing(fileinput.FileInput( [info_filename], mode='r', openhook=fileinput.hook_encoded('utf-8'))) as f: # FileInput doesn't have a read method, we can't call json.load info = self.filter_requested_info(json.loads('\n'.join(f))) try: self.process_ie_result(info, download=True) except DownloadError: webpage_url = info.get('webpage_url') if webpage_url is not None: self.report_warning('The info failed to download, trying with "%s"' % webpage_url) return self.download([webpage_url]) else: raise return self._download_retcode @staticmethod def filter_requested_info(info_dict): return dict( (k, v) for k, v in info_dict.items() if k not in ['requested_formats', 'requested_subtitles']) def post_process(self, filename, ie_info): """Run all the postprocessors on the given file.""" info = dict(ie_info) info['filepath'] = filename pps_chain = [] if ie_info.get('__postprocessors') is not None: pps_chain.extend(ie_info['__postprocessors']) pps_chain.extend(self._pps) for pp in pps_chain: files_to_delete = [] try: files_to_delete, info = pp.run(info) except PostProcessingError as e: self.report_error(e.msg) if files_to_delete and not self.params.get('keepvideo', False): for old_filename in files_to_delete: self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename) try: os.remove(encodeFilename(old_filename)) except (IOError, OSError): self.report_warning('Unable to remove downloaded original file') def _make_archive_id(self, info_dict): # Future-proof against any change in case # and backwards compatibility with prior versions extractor = info_dict.get('extractor_key') if extractor is None: if 'id' in info_dict: extractor = info_dict.get('ie_key') # key in a playlist if extractor is None: return None # Incomplete video information return extractor.lower() + ' ' + info_dict['id'] def in_download_archive(self, info_dict): fn = self.params.get('download_archive') if fn is None: return False vid_id = self._make_archive_id(info_dict) if vid_id is None: return False # Incomplete video information try: with locked_file(fn, 'r', encoding='utf-8') as archive_file: for line in archive_file: if line.strip() == vid_id: return True except IOError as ioe: if ioe.errno != errno.ENOENT: raise return False def record_download_archive(self, info_dict): fn = self.params.get('download_archive') if fn is None: return vid_id = self._make_archive_id(info_dict) assert vid_id with locked_file(fn, 'a', encoding='utf-8') as archive_file: archive_file.write(vid_id + '\n') @staticmethod def format_resolution(format, default='unknown'): if format.get('vcodec') == 'none': return 'audio only' if format.get('resolution') is not None: return format['resolution'] if format.get('height') is not None: if format.get('width') is not None: res = '%sx%s' % (format['width'], format['height']) else: res = '%sp' % format['height'] elif format.get('width') is not None: res = '%dx?' % format['width'] else: res = default return res def _format_note(self, fdict): res = '' if fdict.get('ext') in ['f4f', 'f4m']: res += '(unsupported) ' if fdict.get('language'): if res: res += ' ' res += '[%s] ' % fdict['language'] if fdict.get('format_note') is not None: res += fdict['format_note'] + ' ' if fdict.get('tbr') is not None: res += '%4dk ' % fdict['tbr'] if fdict.get('container') is not None: if res: res += ', ' res += '%s container' % fdict['container'] if (fdict.get('vcodec') is not None and fdict.get('vcodec') != 'none'): if res: res += ', ' res += fdict['vcodec'] if fdict.get('vbr') is not None: res += '@' elif fdict.get('vbr') is not None and fdict.get('abr') is not None: res += 'video@' if fdict.get('vbr') is not None: res += '%4dk' % fdict['vbr'] if fdict.get('fps') is not None: if res: res += ', ' res += '%sfps' % fdict['fps'] if fdict.get('acodec') is not None: if res: res += ', ' if fdict['acodec'] == 'none': res += 'video only' else: res += '%-5s' % fdict['acodec'] elif fdict.get('abr') is not None: if res: res += ', ' res += 'audio' if fdict.get('abr') is not None: res += '@%3dk' % fdict['abr'] if fdict.get('asr') is not None: res += ' (%5dHz)' % fdict['asr'] if fdict.get('filesize') is not None: if res: res += ', ' res += format_bytes(fdict['filesize']) elif fdict.get('filesize_approx') is not None: if res: res += ', ' res += '~' + format_bytes(fdict['filesize_approx']) return res def list_formats(self, info_dict): formats = info_dict.get('formats', [info_dict]) table = [ [f['format_id'], f['ext'], self.format_resolution(f), self._format_note(f)] for f in formats if f.get('preference') is None or f['preference'] >= -1000] if len(formats) > 1: table[-1][-1] += (' ' if table[-1][-1] else '') + '(best)' header_line = ['format code', 'extension', 'resolution', 'note'] self.to_screen( '[info] Available formats for %s:\n%s' % (info_dict['id'], render_table(header_line, table))) def list_thumbnails(self, info_dict): thumbnails = info_dict.get('thumbnails') if not thumbnails: self.to_screen('[info] No thumbnails present for %s' % info_dict['id']) return self.to_screen( '[info] Thumbnails for %s:' % info_dict['id']) self.to_screen(render_table( ['ID', 'width', 'height', 'URL'], [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails])) def list_subtitles(self, video_id, subtitles, name='subtitles'): if not subtitles: self.to_screen('%s has no %s' % (video_id, name)) return self.to_screen( 'Available %s for %s:' % (name, video_id)) self.to_screen(render_table( ['Language', 'formats'], [[lang, ', '.join(f['ext'] for f in reversed(formats))] for lang, formats in subtitles.items()])) def urlopen(self, req): """ Start an HTTP download """ if isinstance(req, compat_basestring): req = sanitized_Request(req) return self._opener.open(req, timeout=self._socket_timeout) def print_debug_header(self): if not self.params.get('verbose'): return if type('') is not compat_str: # Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326) self.report_warning( 'Your Python is broken! Update to a newer and supported version') stdout_encoding = getattr( sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__) encoding_str = ( '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % ( locale.getpreferredencoding(), sys.getfilesystemencoding(), stdout_encoding, self.get_encoding())) write_string(encoding_str, encoding=None) self._write_string('[debug] youtube-dl version ' + __version__ + '\n') if _LAZY_LOADER: self._write_string('[debug] Lazy loading extractors enabled' + '\n') try: sp = subprocess.Popen( ['git', 'rev-parse', '--short', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=os.path.dirname(os.path.abspath(__file__))) out, err = sp.communicate() out = out.decode().strip() if re.match('[0-9a-f]+', out): self._write_string('[debug] Git HEAD: ' + out + '\n') except Exception: try: sys.exc_clear() except Exception: pass self._write_string('[debug] Python version %s - %s\n' % ( platform.python_version(), platform_name())) exe_versions = FFmpegPostProcessor.get_versions(self) exe_versions['rtmpdump'] = rtmpdump_version() exe_str = ', '.join( '%s %s' % (exe, v) for exe, v in sorted(exe_versions.items()) if v ) if not exe_str: exe_str = 'none' self._write_string('[debug] exe versions: %s\n' % exe_str) proxy_map = {} for handler in self._opener.handlers: if hasattr(handler, 'proxies'): proxy_map.update(handler.proxies) self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n') if self.params.get('call_home', False): ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8') self._write_string('[debug] Public IP address: %s\n' % ipaddr) latest_version = self.urlopen( 'https://yt-dl.org/latest/version').read().decode('utf-8') if version_tuple(latest_version) > version_tuple(__version__): self.report_warning( 'You are using an outdated version (newest version: %s)! ' 'See https://yt-dl.org/update if you need help updating.' % latest_version) def _setup_opener(self): timeout_val = self.params.get('socket_timeout') self._socket_timeout = 600 if timeout_val is None else float(timeout_val) opts_cookiefile = self.params.get('cookiefile') opts_proxy = self.params.get('proxy') if opts_cookiefile is None: self.cookiejar = compat_cookiejar.CookieJar() else: opts_cookiefile = expand_path(opts_cookiefile) self.cookiejar = compat_cookiejar.MozillaCookieJar( opts_cookiefile) if os.access(opts_cookiefile, os.R_OK): self.cookiejar.load() cookie_processor = YoutubeDLCookieProcessor(self.cookiejar) if opts_proxy is not None: if opts_proxy == '': proxies = {} else: proxies = {'http': opts_proxy, 'https': opts_proxy} else: proxies = compat_urllib_request.getproxies() # Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805) if 'http' in proxies and 'https' not in proxies: proxies['https'] = proxies['http'] proxy_handler = PerRequestProxyHandler(proxies) debuglevel = 1 if self.params.get('debug_printtraffic') else 0 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel) ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel) data_handler = compat_urllib_request_DataHandler() # When passing our own FileHandler instance, build_opener won't add the # default FileHandler and allows us to disable the file protocol, which # can be used for malicious purposes (see # https://github.com/rg3/youtube-dl/issues/8227) file_handler = compat_urllib_request.FileHandler() def file_open(*args, **kwargs): raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in youtube-dl for security reasons') file_handler.file_open = file_open opener = compat_urllib_request.build_opener( proxy_handler, https_handler, cookie_processor, ydlh, data_handler, file_handler) # Delete the default user-agent header, which would otherwise apply in # cases where our custom HTTP handler doesn't come into play # (See https://github.com/rg3/youtube-dl/issues/1309 for details) opener.addheaders = [] self._opener = opener def encode(self, s): if isinstance(s, bytes): return s # Already encoded try: return s.encode(self.get_encoding()) except UnicodeEncodeError as err: err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.' raise def get_encoding(self): encoding = self.params.get('encoding') if encoding is None: encoding = preferredencoding() return encoding def _write_thumbnails(self, info_dict, filename): if self.params.get('writethumbnail', False): thumbnails = info_dict.get('thumbnails') if thumbnails: thumbnails = [thumbnails[-1]] elif self.params.get('write_all_thumbnails', False): thumbnails = info_dict.get('thumbnails') else: return if not thumbnails: # No thumbnails present, so return immediately return for t in thumbnails: thumb_ext = determine_ext(t['url'], 'jpg') suffix = '_%s' % t['id'] if len(thumbnails) > 1 else '' thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else '' t['filename'] = thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)): self.to_screen('[%s] %s: Thumbnail %sis already present' % (info_dict['extractor'], info_dict['id'], thumb_display_id)) else: self.to_screen('[%s] %s: Downloading thumbnail %s...' % (info_dict['extractor'], info_dict['id'], thumb_display_id)) try: uf = self.urlopen(t['url']) with open(encodeFilename(thumb_filename), 'wb') as thumbf: shutil.copyfileobj(uf, thumbf) self.to_screen('[%s] %s: Writing thumbnail %sto: %s' % (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename)) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_warning('Unable to download thumbnail "%s": %s' % (t['url'], error_to_compat_str(err)))
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-18-fixed/youtube-dl/youtube_dl/YoutubeDL.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-18-buggy/youtube-dl/youtube_dl/YoutubeDL.py
youtube-dl-bug-13
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals import base64 import binascii import calendar import codecs import contextlib import ctypes import datetime import email.utils import email.header import errno import functools import gzip import io import itertools import json import locale import math import operator import os import platform import random import re import socket import ssl import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_HTMLParseError, compat_HTMLParser, compat_basestring, compat_chr, compat_cookiejar, compat_ctypes_WINFUNCTYPE, compat_etree_fromstring, compat_expanduser, compat_html_entities, compat_html_entities_html5, compat_http_client, compat_kwargs, compat_os_name, compat_parse_qs, compat_shlex_quote, compat_str, compat_struct_pack, compat_struct_unpack, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, compat_urllib_parse_unquote_plus, compat_urllib_request, compat_urlparse, compat_xpath, ) from .socks import ( ProxyType, sockssocket, ) def register_socks_protocols(): # "Register" SOCKS protocols # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904 # URLs with protocols not in urlparse.uses_netloc are not handled correctly for scheme in ('socks', 'socks4', 'socks4a', 'socks5'): if scheme not in compat_urlparse.uses_netloc: compat_urlparse.uses_netloc.append(scheme) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:59.0) Gecko/20100101 Firefox/59.0', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } USER_AGENTS = { 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] MONTH_NAMES = { 'en': ENGLISH_MONTH_NAMES, 'fr': [ 'janvier', 'février', 'mars', 'avril', 'mai', 'juin', 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'], } KNOWN_EXTENSIONS = ( 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', 'flv', 'f4v', 'f4a', 'f4b', 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', 'mkv', 'mka', 'mk3d', 'avi', 'divx', 'mov', 'asf', 'wmv', 'wma', '3gp', '3g2', 'mp3', 'flac', 'ape', 'wav', 'f4f', 'f4m', 'm3u8', 'smil') # needed for sanitizing filenames in restricted mode ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'], 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy'))) DATE_FORMATS = ( '%d %B %Y', '%d %b %Y', '%B %d %Y', '%B %dst %Y', '%B %dnd %Y', '%B %dth %Y', '%b %d %Y', '%b %dst %Y', '%b %dnd %Y', '%b %dth %Y', '%b %dst %Y %I:%M', '%b %dnd %Y %I:%M', '%b %dth %Y %I:%M', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', '%b %d %Y at %H:%M', '%b %d %Y at %H:%M:%S', '%B %d %Y at %H:%M', '%B %d %Y at %H:%M:%S', ) DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS) DATE_FORMATS_DAY_FIRST.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d.%m.%y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS) DATE_FORMATS_MONTH_FIRST.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)" JSON_LD_RE = r'(?is)<script[^>]+type=(["\'])application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>' def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): for f in node.findall(compat_xpath(xpath)): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): return node.find(compat_xpath(xpath)) if isinstance(xpath, (str, compat_str)): n = _find_xpath(xpath) else: for xp in xpath: n = _find_xpath(xp) if n is not None: break if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute('id', id, html) def get_element_by_class(class_name, html): """Return the content of the first tag with the specified class in the passed HTML document""" retval = get_elements_by_class(class_name, html) return retval[0] if retval else None def get_element_by_attribute(attribute, value, html, escape_value=True): retval = get_elements_by_attribute(attribute, value, html, escape_value) return retval[0] if retval else None def get_elements_by_class(class_name, html): """Return the content of all tags with the specified class in the passed HTML document as a list""" return get_elements_by_attribute( 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name), html, escape_value=False) def get_elements_by_attribute(attribute, value, html, escape_value=True): """Return the content of the tag with the specified attribute in the passed HTML document""" value = re.escape(value) if escape_value else value retlist = [] for m in re.finditer(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), value), html): res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] retlist.append(unescapeHTML(res)) return retlist class HTMLAttributeParser(compat_HTMLParser): """Trivial HTML parser to gather the attributes for a single element""" def __init__(self): self.attrs = {} compat_HTMLParser.__init__(self) def handle_starttag(self, tag, attrs): self.attrs = dict(attrs) def extract_attributes(html_element): """Given a string for an HTML element such as <el a="foo" B="bar" c="&98;az" d=boz empty= noval entity="&amp;" sq='"' dq="'" > Decode and return a dictionary of attributes. { 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz', 'empty': '', 'noval': None, 'entity': '&', 'sq': '"', 'dq': '\'' }. NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions, but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5. """ parser = HTMLAttributeParser() try: parser.feed(html_element) parser.close() # Older Python may throw HTMLParseError in case of malformed HTML except compat_HTMLParseError: pass return parser.attrs def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'(?u)\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'(?u)<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible. """ def replace_insane(char): if restricted and char in ACCENT_CHARS: return ACCENT_CHARS[char] if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) def sanitize_url(url): # Prepend protocol-less URLs with `http:` scheme in order to mitigate # the number of unwanted failures due to missing protocol if url.startswith('//'): return 'http:%s' % url # Fix some common typos seen so far COMMON_TYPOS = ( # https://github.com/rg3/youtube-dl/issues/15649 (r'^httpss://', r'https://'), # https://bx1.be/lives/direct-tv/ (r'^rmtp([es]?)://', r'rtmp\1://'), ) for mistake, fixup in COMMON_TYPOS: if re.match(mistake, url): return re.sub(mistake, fixup, url) return url def sanitized_Request(url, *args, **kwargs): return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs) def expand_path(s): """Expand shell variables and ~""" return os.path.expandvars(compat_expanduser(s)) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity_with_semicolon): """Transforms an HTML entity to a character.""" entity = entity_with_semicolon[:-1] # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) # TODO: HTML5 allows entities without a semicolon. For example, # '&Eacuteric' should be decoded as 'Éric'. if entity_with_semicolon in compat_html_entities_html5: return compat_html_entities_html5[entity_with_semicolon] mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 # See https://github.com/rg3/youtube-dl/issues/7518 try: return compat_chr(int(numstr, base)) except ValueError: pass # Unknown entity in name, return its literal representation return '&%s;' % entity def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible if sys.platform.startswith('java'): return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class YoutubeDLError(Exception): """Base exception for YoutubeDL errors.""" pass class ExtractorError(YoutubeDLError): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class GeoRestrictedError(ExtractorError): """Geographic restriction Error exception. This exception may be thrown when a video is not available from your geographic location due to geographic restrictions imposed by a website. """ def __init__(self, msg, countries=None): super(GeoRestrictedError, self).__init__(msg, expected=True) self.msg = msg self.countries = countries class DownloadError(YoutubeDLError): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(YoutubeDLError): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(YoutubeDLError): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): super(PostProcessingError, self).__init__(msg) self.msg = msg class MaxDownloadsReached(YoutubeDLError): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(YoutubeDLError): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(YoutubeDLError): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): super(ContentTooShortError, self).__init__( 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected) ) # Both in bytes self.downloaded = downloaded self.expected = expected class XAttrMetadataError(YoutubeDLError): def __init__(self, code=None, msg='Unknown error'): super(XAttrMetadataError, self).__init__(msg) self.code = code self.msg = msg # Parsing code and msg if (self.code in (errno.ENOSPC, errno.EDQUOT) or 'No space left' in self.msg or 'Disk quota excedded' in self.msg): self.reason = 'NO_SPACE' elif self.code == errno.E2BIG or 'Argument list too long' in self.msg: self.reason = 'VALUE_TOO_LONG' else: self.reason = 'NOT_SUPPORTED' class XAttrUnavailableError(YoutubeDLError): pass def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs['strict'] = True hc = http_class(*args, **compat_kwargs(kwargs)) source_address = ydl_handler._params.get('source_address') if source_address is not None: # This is to workaround _create_connection() from socket where it will try all # address data from getaddrinfo() including IPv6. This filters the result from # getaddrinfo() based on the source_address value. # This is based on the cpython socket.create_connection() function. # https://github.com/python/cpython/blob/master/Lib/socket.py#L691 def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None): host, port = address err = None addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM) af = socket.AF_INET if '.' in source_address[0] else socket.AF_INET6 ip_addrs = [addr for addr in addrs if addr[0] == af] if addrs and not ip_addrs: ip_version = 'v4' if af == socket.AF_INET else 'v6' raise socket.error( "No remote IP%s addresses available for connect, can't use '%s' as source address" % (ip_version, source_address[0])) for res in ip_addrs: af, socktype, proto, canonname, sa = res sock = None try: sock = socket.socket(af, socktype, proto) if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: sock.settimeout(timeout) sock.bind(source_address) sock.connect(sa) err = None # Explicitly break reference cycle return sock except socket.error as _: err = _ if sock is not None: sock.close() if err is not None: raise err else: raise socket.error('getaddrinfo returns an empty list') if hasattr(hc, '_create_connection'): hc._create_connection = _create_connection sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = _create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc def handle_youtubedl_headers(headers): filtered_headers = headers if 'Youtubedl-no-compression' in filtered_headers: filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') del filtered_headers['Youtubedl-no-compression'] return filtered_headers class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-no-compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): conn_class = compat_http_client.HTTPConnection socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req = update_Request(req, url=url_escaped) for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) req.headers = handle_youtubedl_headers(req.headers) if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') else: location = location.decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] if sys.version_info < (3, 0): location_escaped = location_escaped.encode('utf-8') resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response def make_socks_conn_class(base_class, socks_proxy): assert issubclass(base_class, ( compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection)) url_components = compat_urlparse.urlparse(socks_proxy) if url_components.scheme.lower() == 'socks5': socks_type = ProxyType.SOCKS5 elif url_components.scheme.lower() in ('socks', 'socks4'): socks_type = ProxyType.SOCKS4 elif url_components.scheme.lower() == 'socks4a': socks_type = ProxyType.SOCKS4A def unquote_if_non_empty(s): if not s: return s return compat_urllib_parse_unquote_plus(s) proxy_args = ( socks_type, url_components.hostname, url_components.port or 1080, True, # Remote DNS unquote_if_non_empty(url_components.username), unquote_if_non_empty(url_components.password), ) class SocksConnection(base_class): def connect(self): self.sock = sockssocket() self.sock.setproxy(*proxy_args) if type(self.timeout) in (int, float): self.sock.settimeout(self.timeout) self.sock.connect((self.host, self.port)) if isinstance(self, compat_http_client.HTTPSConnection): if hasattr(self, '_context'): # Python > 2.6 self.sock = self._context.wrap_socket( self.sock, server_hostname=self.host) else: self.sock = ssl.wrap_socket(self.sock) return SocksConnection class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} conn_class = self._https_conn_class if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, True), req, **kwargs) class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar): def save(self, filename=None, ignore_discard=False, ignore_expires=False): # Store session cookies with `expires` set to 0 instead of an empty # string for cookie in self: if cookie.expires is None: cookie.expires = 0 compat_cookiejar.MozillaCookieJar.save(self, filename, ignore_discard, ignore_expires) def load(self, filename=None, ignore_discard=False, ignore_expires=False): compat_cookiejar.MozillaCookieJar.load(self, filename, ignore_discard, ignore_expires) # Session cookies are denoted by either `expires` field set to # an empty string or 0. MozillaCookieJar only recognizes the former # (see [1]). So we need force the latter to be recognized as session # cookies on our own. # Session cookies may be important for cookies-based authentication, # e.g. usually, when user does not check 'Remember me' check box while # logging in on a site, some important cookies are stored as session # cookies so that not recognizing them will result in failed login. # 1. https://bugs.python.org/issue17164 for cookie in self: # Treat `expires=0` cookies as session cookies if cookie.expires == 0: cookie.expires = None cookie.discard = True class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def extract_timezone(date_str): m = re.search( r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group('tz'))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) return timezone, date_str def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: timezone, date_str = extract_timezone(date_str) try: date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) except ValueError: pass def date_formats(day_first=True): return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) _, date_str = extract_timezone(date_str) for expression in date_formats(day_first): try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: try: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') except ValueError: pass if upload_date is not None: return compat_str(upload_date) def unified_timestamp(date_str, day_first=True): if date_str is None: return None date_str = re.sub(r'[,|]', '', date_str) pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0 timezone, date_str = extract_timezone(date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) # Remove unrecognized timezones from ISO 8601 alike timestamps m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str) if m: date_str = date_str[:-len(m.group('tz'))] # Python only supports microseconds, so remove nanoseconds m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str) if m: date_str = m.group(1) for expression in date_formats(day_first): try: dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta) return calendar.timegm(dt.timetuple()) except ValueError: pass timetuple = email.utils.parsedate_tz(date_str) if timetuple: return calendar.timegm(timetuple) + pm_delta * 3600 def determine_ext(url, default_ext='unknown_video'): if url is None or '.' not in url: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download elif guess.rstrip('/') in KNOWN_EXTENSIONS: return guess.rstrip('/') else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad approximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, '%Y%m%d').date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = compat_ctypes_WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( ('GetStdHandle', ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = compat_ctypes_WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)(('WriteConsoleW', ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = compat_ctypes_WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(('GetFileType', ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = compat_ctypes_WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( ('GetConsoleMode', ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return compat_struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: # Some platforms, such as Jython, is missing fcntl try: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) except ImportError: UNSUPPORTED_MSG = 'file locking is not supported on this platform' def _lock_file(f, exclusive): raise IOError(UNSUPPORTED_MSG) def _unlock_file(f): raise IOError(UNSUPPORTED_MSG) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(compat_shlex_quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ url, idata = unsmuggle_url(url, {}) data.update(idata) sdata = compat_urllib_parse_urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def lookup_unit_table(unit_table, s): units_re = '|'.join(re.escape(u) for u in unit_table) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = unit_table[m.group('unit')] return int(float(num_str) * mult) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and unofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'bytes': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'kb': 1000, 'kilobytes': 1000, 'kibibytes': 1024, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'mb': 1000 ** 2, 'megabytes': 1000 ** 2, 'mebibytes': 1024 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'gb': 1000 ** 3, 'gigabytes': 1000 ** 3, 'gibibytes': 1024 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'tb': 1000 ** 4, 'terabytes': 1000 ** 4, 'tebibytes': 1024 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'pb': 1000 ** 5, 'petabytes': 1000 ** 5, 'pebibytes': 1024 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'eb': 1000 ** 6, 'exabytes': 1000 ** 6, 'exbibytes': 1024 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'zb': 1000 ** 7, 'zettabytes': 1000 ** 7, 'zebibytes': 1024 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, 'yb': 1000 ** 8, 'yottabytes': 1000 ** 8, 'yobibytes': 1024 ** 8, } return lookup_unit_table(_UNIT_TABLE, s) def parse_count(s): if s is None: return None s = s.strip() if re.match(r'^[\d,.]+$', s): return str_to_int(s) _UNIT_TABLE = { 'k': 1000, 'K': 1000, 'm': 1000 ** 2, 'M': 1000 ** 2, 'kk': 1000 ** 2, 'KK': 1000 ** 2, } return lookup_unit_table(_UNIT_TABLE, s) def parse_resolution(s): if s is None: return {} mobj = re.search(r'\b(?P<w>\d+)\s*[xX×]\s*(?P<h>\d+)\b', s) if mobj: return { 'width': int(mobj.group('w')), 'height': int(mobj.group('h')), } mobj = re.search(r'\b(\d+)[pPiI]\b', s) if mobj: return {'height': int(mobj.group(1))} mobj = re.search(r'\b([48])[kK]\b', s) if mobj: return {'height': int(mobj.group(1)) * 540} return {} def month_by_name(name, lang='en'): """ Return the number of a month by (locale-independently) English name """ month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en']) try: return month_names.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) # ctypes in Jython is not complete # http://bugs.jython.org/issue2148 if sys.platform.startswith('java'): return try: libc = ctypes.cdll.LoadLibrary('libc.so.6') except OSError: return except TypeError: # LoadLibrary in Windows Python 2.7.13 only expects # a bytestring, but since unicode_literals turns # every string into a unicode string, it fails. return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): return s[len(start):] if s is not None and s.startswith(start) else s def remove_end(s, end): return s[:-len(end)] if s is not None and s.endswith(end) else s def remove_quotes(s): if s is None or len(s) < 2: return s for quote in ('"', "'", ): if s[0] == quote and s[-1] == quote: return s[1:-1] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] def base_url(url): return re.match(r'https?://[^?#&]+/', url).group() def urljoin(base, path): if isinstance(path, bytes): path = path.decode('utf-8') if not isinstance(path, compat_str) or not path: return None if re.match(r'^(?:https?:)?//', path): return path if isinstance(base, bytes): base = base.decode('utf-8') if not isinstance(base, compat_str) or not re.match( r'^(?:https?:)?//', base): return None return compat_urlparse.urljoin(base, path) class HEADRequest(compat_urllib_request.Request): def get_method(self): return 'HEAD' class PUTRequest(compat_urllib_request.Request): def get_method(self): return 'PUT' def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def bool_or_none(v, default=None): return v if isinstance(v, bool) else default def strip_or_none(v): return None if v is None else v.strip() def url_or_none(url): if not url or not isinstance(url, compat_str): return None url = url.strip() return url if re.match(r'^(?:[a-zA-Z][\da-zA-Z.+-]*:)?//', url) else None def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() days, hours, mins, secs, ms = [None] * 5 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match( r'''(?ix)(?:P? (?: [0-9]+\s*y(?:ears?)?\s* )? (?: [0-9]+\s*m(?:onths?)?\s* )? (?: [0-9]+\s*w(?:eeks?)?\s* )? (?: (?P<days>[0-9]+)\s*d(?:ays?)?\s* )? T)? (?: (?P<hours>[0-9]+)\s*h(?:ours?)?\s* )? (?: (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s* )? (?: (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s* )?Z?$''', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s) if m: hours, mins = m.groups() else: return None duration = 0 if secs: duration += float(secs) if mins: duration += float(mins) * 60 if hours: duration += float(hours) * 60 * 60 if days: duration += float(days) * 24 * 60 * 60 if ms: duration += float(ms) return duration def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers # SIGTTOU if youtube-dl is run in the background. # See https://github.com/rg3/youtube-dl/issues/955#issuecomment-209789656 out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize, use_cache=True): self._pagefunc = pagefunc self._pagesize = pagesize self._use_cache = use_cache if use_cache: self._cache = {} def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = None if self._use_cache: page_results = self._cache.get(pagenum) if page_results is None: page_results = list(self._pagefunc(pagenum)) if self._use_cache: self._cache[pagenum] = page_results startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( netloc=url_parsed.netloc.encode('idna').decode('ascii'), path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii') def update_url_query(url, query): if not query: return url parsed_url = compat_urlparse.urlparse(url) qs = compat_parse_qs(parsed_url.query) qs.update(query) return compat_urlparse.urlunparse(parsed_url._replace( query=compat_urllib_parse_urlencode(qs, True))) def update_Request(req, url=None, data=None, headers={}, query={}): req_headers = req.headers.copy() req_headers.update(headers) req_data = data or req.data req_url = update_url_query(url or req.get_full_url(), query) req_get_method = req.get_method() if req_get_method == 'HEAD': req_type = HEADRequest elif req_get_method == 'PUT': req_type = PUTRequest else: req_type = compat_urllib_request.Request new_req = req_type( req_url, data=req_data, headers=req_headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) if hasattr(req, 'timeout'): new_req.timeout = req.timeout return new_req def _multipart_encode_impl(data, boundary): content_type = 'multipart/form-data; boundary=%s' % boundary out = b'' for k, v in data.items(): out += b'--' + boundary.encode('ascii') + b'\r\n' if isinstance(k, compat_str): k = k.encode('utf-8') if isinstance(v, compat_str): v = v.encode('utf-8') # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578 # suggests sending UTF-8 directly. Firefox sends UTF-8, too content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n' if boundary.encode('ascii') in content: raise ValueError('Boundary overlaps with data') out += content out += b'--' + boundary.encode('ascii') + b'--\r\n' return out, content_type def multipart_encode(data, boundary=None): ''' Encode a dict to RFC 7578-compliant form-data data: A dict where keys and values can be either Unicode or bytes-like objects. boundary: If specified a Unicode object, it's used as the boundary. Otherwise a random boundary is generated. Reference: https://tools.ietf.org/html/rfc7578 ''' has_specified_boundary = boundary is not None while True: if boundary is None: boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff)) try: out, content_type = _multipart_encode_impl(data, boundary) break except ValueError: if has_specified_boundary: raise boundary = None return out, content_type def dict_get(d, key_or_keys, default=None, skip_false_values=True): if isinstance(key_or_keys, (list, tuple)): for key in key_or_keys: if key not in d or d[key] is None or skip_false_values and not d[key]: continue return d[key] return default return d.get(key_or_keys, default) def try_get(src, getter, expected_type=None): if not isinstance(getter, (list, tuple)): getter = [getter] for get in getter: try: v = get(src) except (AttributeError, KeyError, TypeError, IndexError): pass else: if expected_type is None or isinstance(v, expected_type): return v def merge_dicts(*dicts): merged = {} for a_dict in dicts: for k, v in a_dict.items(): if v is None: continue if (k not in merged or (isinstance(v, compat_str) and v and isinstance(merged[k], compat_str) and not merged[k])): merged[k] = v return merged def encode_compat_str(string, encoding=preferredencoding(), errors='strict'): return string if isinstance(string, compat_str) else compat_str(string, encoding, errors) US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } TV_PARENTAL_GUIDELINES = { 'TV-Y': 0, 'TV-Y7': 7, 'TV-G': 0, 'TV-PG': 0, 'TV-14': 14, 'TV-MA': 17, } def parse_age_limit(s): if type(s) == int: return s if 0 <= s <= 21 else None if not isinstance(s, compat_basestring): return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) if m: return int(m.group('age')) if s in US_RATINGS: return US_RATINGS[s] m = re.match(r'^TV[_-]?(%s)$' % '|'.join(k[3:] for k in TV_PARENTAL_GUIDELINES), s) if m: return TV_PARENTAL_GUIDELINES['TV-' + m.group(1)] return None def strip_jsonp(code): return re.sub( r'''(?sx)^ (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*) (?:\s*&&\s*(?P=func_name))? \s*\(\s*(?P<callback_data>.*)\);? \s*?(?://[^\n]*)*$''', r'\g<callback_data>', code) def js_to_json(code): COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*' SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE) INTEGER_TABLE = ( (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16), (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8), ) def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v elif v.startswith('/*') or v.startswith('//') or v == ',': return "" if v[0] in ("'", '"'): v = re.sub(r'(?s)\\.|"', lambda m: { '"': '\\"', "\\'": "'", '\\\n': '', '\\x': '\\u00', }.get(m.group(0), m.group(0)), v[1:-1]) for regex, base in INTEGER_TABLE: im = re.match(regex, v) if im: i = int(im.group(1), base) return '"%d":' % i if v.endswith(':') else '%d' % i return '"%s"' % v return re.sub(r'''(?sx) "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'| {comment}|,(?={skip}[\]}}])| (?:(?<![0-9])[eE]|[a-df-zA-DF-Z_])[.a-zA-Z_0-9]*| \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?| [0-9]+(?={skip}:) '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code) def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(compat_shlex_quote(a) for a in args) def error_to_compat_str(err): err_str = str(err) # On python 2 error byte string must be decoded with proper # encoding rather than ascii if sys.version_info[0] < 3: err_str = err_str.decode(preferredencoding()) return err_str def mimetype2ext(mt): if mt is None: return None ext = { 'audio/mp4': 'm4a', # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as # it's the most popular one 'audio/mpeg': 'mp3', }.get(mt) if ext is not None: return ext _, _, res = mt.rpartition('/') res = res.split(';')[0].strip().lower() return { '3gpp': '3gp', 'smptett+xml': 'tt', 'ttaf+xml': 'dfxp', 'ttml+xml': 'ttml', 'x-flv': 'flv', 'x-mp4-fragmented': 'mp4', 'x-ms-sami': 'sami', 'x-ms-wmv': 'wmv', 'mpegurl': 'm3u8', 'x-mpegurl': 'm3u8', 'vnd.apple.mpegurl': 'm3u8', 'dash+xml': 'mpd', 'f4m+xml': 'f4m', 'hds+xml': 'f4m', 'vnd.ms-sstr+xml': 'ism', 'quicktime': 'mov', 'mp2t': 'ts', }.get(res, res) def parse_codecs(codecs_str): # http://tools.ietf.org/html/rfc6381 if not codecs_str: return {} splited_codecs = list(filter(None, map( lambda str: str.strip(), codecs_str.strip().strip(',').split(',')))) vcodec, acodec = None, None for full_codec in splited_codecs: codec = full_codec.split('.')[0] if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v', 'hvc1', 'av01'): if not vcodec: vcodec = full_codec elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'): if not acodec: acodec = full_codec else: write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr) if not vcodec and not acodec: if len(splited_codecs) == 2: return { 'vcodec': vcodec, 'acodec': acodec, } elif len(splited_codecs) == 1: return { 'vcodec': 'none', 'acodec': vcodec, } else: return { 'vcodec': vcodec or 'none', 'acodec': acodec or 'none', } return {} def urlhandle_detect_ext(url_handle): getheader = url_handle.headers.get cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<quote>["\'])(?P<quotedstrval>(?:\\.|(?!(?P=quote)|\\).)+?)(?P=quote)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) if (m.group('quotedstrval') is not None or m.group('strval') is not None or # If the original field is a string and matching comparisonvalue is # a number we should respect the origin of the original field # and process comparison value as a string (see # https://github.com/rg3/youtube-dl/issues/11082). actual_value is not None and m.group('intval') is not None and isinstance(actual_value, compat_str)): if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval') quote = m.group('quote') if quote is not None: comparison_value = comparison_value.replace(r'\%s' % quote, quote) else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: (v is True) if isinstance(v, bool) else (v is not None), '!': lambda v: (v is False) if isinstance(v, bool) else (v is None), } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.')) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): ''' @param dfxp_data A bytes-like object containing DFXP data @returns A unicode object containing converted SRT data ''' LEGACY_NAMESPACES = ( (b'http://www.w3.org/ns/ttml', [ b'http://www.w3.org/2004/11/ttaf1', b'http://www.w3.org/2006/04/ttaf1', b'http://www.w3.org/2006/10/ttaf1', ]), (b'http://www.w3.org/ns/ttml#styling', [ b'http://www.w3.org/ns/ttml#style', ]), ) SUPPORTED_STYLING = [ 'color', 'fontFamily', 'fontSize', 'fontStyle', 'fontWeight', 'textDecoration' ] _x = functools.partial(xpath_with_ns, ns_map={ 'xml': 'http://www.w3.org/XML/1998/namespace', 'ttml': 'http://www.w3.org/ns/ttml', 'tts': 'http://www.w3.org/ns/ttml#styling', }) styles = {} default_style = {} class TTMLPElementParser(object): _out = '' _unclosed_elements = [] _applied_styles = [] def start(self, tag, attrib): if tag in (_x('ttml:br'), 'br'): self._out += '\n' else: unclosed_elements = [] style = {} element_style_id = attrib.get('style') if default_style: style.update(default_style) if element_style_id: style.update(styles.get(element_style_id, {})) for prop in SUPPORTED_STYLING: prop_val = attrib.get(_x('tts:' + prop)) if prop_val: style[prop] = prop_val if style: font = '' for k, v in sorted(style.items()): if self._applied_styles and self._applied_styles[-1].get(k) == v: continue if k == 'color': font += ' color="%s"' % v elif k == 'fontSize': font += ' size="%s"' % v elif k == 'fontFamily': font += ' face="%s"' % v elif k == 'fontWeight' and v == 'bold': self._out += '<b>' unclosed_elements.append('b') elif k == 'fontStyle' and v == 'italic': self._out += '<i>' unclosed_elements.append('i') elif k == 'textDecoration' and v == 'underline': self._out += '<u>' unclosed_elements.append('u') if font: self._out += '<font' + font + '>' unclosed_elements.append('font') applied_style = {} if self._applied_styles: applied_style.update(self._applied_styles[-1]) applied_style.update(style) self._applied_styles.append(applied_style) self._unclosed_elements.append(unclosed_elements) def end(self, tag): if tag not in (_x('ttml:br'), 'br'): unclosed_elements = self._unclosed_elements.pop() for element in reversed(unclosed_elements): self._out += '</%s>' % element if unclosed_elements and self._applied_styles: self._applied_styles.pop() def data(self, data): self._out += data def close(self): return self._out.strip() def parse_node(node): target = TTMLPElementParser() parser = xml.etree.ElementTree.XMLParser(target=target) parser.feed(xml.etree.ElementTree.tostring(node)) return parser.close() for k, v in LEGACY_NAMESPACES: for ns in v: dfxp_data = dfxp_data.replace(ns, k) dfxp = compat_etree_fromstring(dfxp_data) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') repeat = False while True: for style in dfxp.findall(_x('.//ttml:style')): style_id = style.get('id') or style.get(_x('xml:id')) if not style_id: continue parent_style_id = style.get('style') if parent_style_id: if parent_style_id not in styles: repeat = True continue styles[style_id] = styles[parent_style_id].copy() for prop in SUPPORTED_STYLING: prop_val = style.get(_x('tts:' + prop)) if prop_val: styles.setdefault(style_id, {})[prop] = prop_val if repeat: repeat = False else: break for p in ('body', 'div'): ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p]) if ele is None: continue style = styles.get(ele.get('style')) if not style: continue default_style.update(style) for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) dur = parse_dfxp_time_expr(para.attrib.get('dur')) if begin_time is None: continue if not end_time: if not dur: continue end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) if param: param = compat_str(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) if param is None: return [] assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'iw': 'heb', # Replaced by he in 1989 revision 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'in': 'ind', # Replaced by id in 1989 revision 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'ji': 'yid', # Replaced by yi in 1989 revision 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class GeoUtils(object): # Major IPv4 address blocks per country _country_ip_map = { 'AD': '85.94.160.0/19', 'AE': '94.200.0.0/13', 'AF': '149.54.0.0/17', 'AG': '209.59.64.0/18', 'AI': '204.14.248.0/21', 'AL': '46.99.0.0/16', 'AM': '46.70.0.0/15', 'AO': '105.168.0.0/13', 'AP': '159.117.192.0/21', 'AR': '181.0.0.0/12', 'AS': '202.70.112.0/20', 'AT': '84.112.0.0/13', 'AU': '1.128.0.0/11', 'AW': '181.41.0.0/18', 'AZ': '5.191.0.0/16', 'BA': '31.176.128.0/17', 'BB': '65.48.128.0/17', 'BD': '114.130.0.0/16', 'BE': '57.0.0.0/8', 'BF': '129.45.128.0/17', 'BG': '95.42.0.0/15', 'BH': '37.131.0.0/17', 'BI': '154.117.192.0/18', 'BJ': '137.255.0.0/16', 'BL': '192.131.134.0/24', 'BM': '196.12.64.0/18', 'BN': '156.31.0.0/16', 'BO': '161.56.0.0/16', 'BQ': '161.0.80.0/20', 'BR': '152.240.0.0/12', 'BS': '24.51.64.0/18', 'BT': '119.2.96.0/19', 'BW': '168.167.0.0/16', 'BY': '178.120.0.0/13', 'BZ': '179.42.192.0/18', 'CA': '99.224.0.0/11', 'CD': '41.243.0.0/16', 'CF': '196.32.200.0/21', 'CG': '197.214.128.0/17', 'CH': '85.0.0.0/13', 'CI': '154.232.0.0/14', 'CK': '202.65.32.0/19', 'CL': '152.172.0.0/14', 'CM': '165.210.0.0/15', 'CN': '36.128.0.0/10', 'CO': '181.240.0.0/12', 'CR': '201.192.0.0/12', 'CU': '152.206.0.0/15', 'CV': '165.90.96.0/19', 'CW': '190.88.128.0/17', 'CY': '46.198.0.0/15', 'CZ': '88.100.0.0/14', 'DE': '53.0.0.0/8', 'DJ': '197.241.0.0/17', 'DK': '87.48.0.0/12', 'DM': '192.243.48.0/20', 'DO': '152.166.0.0/15', 'DZ': '41.96.0.0/12', 'EC': '186.68.0.0/15', 'EE': '90.190.0.0/15', 'EG': '156.160.0.0/11', 'ER': '196.200.96.0/20', 'ES': '88.0.0.0/11', 'ET': '196.188.0.0/14', 'EU': '2.16.0.0/13', 'FI': '91.152.0.0/13', 'FJ': '144.120.0.0/16', 'FM': '119.252.112.0/20', 'FO': '88.85.32.0/19', 'FR': '90.0.0.0/9', 'GA': '41.158.0.0/15', 'GB': '25.0.0.0/8', 'GD': '74.122.88.0/21', 'GE': '31.146.0.0/16', 'GF': '161.22.64.0/18', 'GG': '62.68.160.0/19', 'GH': '45.208.0.0/14', 'GI': '85.115.128.0/19', 'GL': '88.83.0.0/19', 'GM': '160.182.0.0/15', 'GN': '197.149.192.0/18', 'GP': '104.250.0.0/19', 'GQ': '105.235.224.0/20', 'GR': '94.64.0.0/13', 'GT': '168.234.0.0/16', 'GU': '168.123.0.0/16', 'GW': '197.214.80.0/20', 'GY': '181.41.64.0/18', 'HK': '113.252.0.0/14', 'HN': '181.210.0.0/16', 'HR': '93.136.0.0/13', 'HT': '148.102.128.0/17', 'HU': '84.0.0.0/14', 'ID': '39.192.0.0/10', 'IE': '87.32.0.0/12', 'IL': '79.176.0.0/13', 'IM': '5.62.80.0/20', 'IN': '117.192.0.0/10', 'IO': '203.83.48.0/21', 'IQ': '37.236.0.0/14', 'IR': '2.176.0.0/12', 'IS': '82.221.0.0/16', 'IT': '79.0.0.0/10', 'JE': '87.244.64.0/18', 'JM': '72.27.0.0/17', 'JO': '176.29.0.0/16', 'JP': '126.0.0.0/8', 'KE': '105.48.0.0/12', 'KG': '158.181.128.0/17', 'KH': '36.37.128.0/17', 'KI': '103.25.140.0/22', 'KM': '197.255.224.0/20', 'KN': '198.32.32.0/19', 'KP': '175.45.176.0/22', 'KR': '175.192.0.0/10', 'KW': '37.36.0.0/14', 'KY': '64.96.0.0/15', 'KZ': '2.72.0.0/13', 'LA': '115.84.64.0/18', 'LB': '178.135.0.0/16', 'LC': '192.147.231.0/24', 'LI': '82.117.0.0/19', 'LK': '112.134.0.0/15', 'LR': '41.86.0.0/19', 'LS': '129.232.0.0/17', 'LT': '78.56.0.0/13', 'LU': '188.42.0.0/16', 'LV': '46.109.0.0/16', 'LY': '41.252.0.0/14', 'MA': '105.128.0.0/11', 'MC': '88.209.64.0/18', 'MD': '37.246.0.0/16', 'ME': '178.175.0.0/17', 'MF': '74.112.232.0/21', 'MG': '154.126.0.0/17', 'MH': '117.103.88.0/21', 'MK': '77.28.0.0/15', 'ML': '154.118.128.0/18', 'MM': '37.111.0.0/17', 'MN': '49.0.128.0/17', 'MO': '60.246.0.0/16', 'MP': '202.88.64.0/20', 'MQ': '109.203.224.0/19', 'MR': '41.188.64.0/18', 'MS': '208.90.112.0/22', 'MT': '46.11.0.0/16', 'MU': '105.16.0.0/12', 'MV': '27.114.128.0/18', 'MW': '105.234.0.0/16', 'MX': '187.192.0.0/11', 'MY': '175.136.0.0/13', 'MZ': '197.218.0.0/15', 'NA': '41.182.0.0/16', 'NC': '101.101.0.0/18', 'NE': '197.214.0.0/18', 'NF': '203.17.240.0/22', 'NG': '105.112.0.0/12', 'NI': '186.76.0.0/15', 'NL': '145.96.0.0/11', 'NO': '84.208.0.0/13', 'NP': '36.252.0.0/15', 'NR': '203.98.224.0/19', 'NU': '49.156.48.0/22', 'NZ': '49.224.0.0/14', 'OM': '5.36.0.0/15', 'PA': '186.72.0.0/15', 'PE': '186.160.0.0/14', 'PF': '123.50.64.0/18', 'PG': '124.240.192.0/19', 'PH': '49.144.0.0/13', 'PK': '39.32.0.0/11', 'PL': '83.0.0.0/11', 'PM': '70.36.0.0/20', 'PR': '66.50.0.0/16', 'PS': '188.161.0.0/16', 'PT': '85.240.0.0/13', 'PW': '202.124.224.0/20', 'PY': '181.120.0.0/14', 'QA': '37.210.0.0/15', 'RE': '139.26.0.0/16', 'RO': '79.112.0.0/13', 'RS': '178.220.0.0/14', 'RU': '5.136.0.0/13', 'RW': '105.178.0.0/15', 'SA': '188.48.0.0/13', 'SB': '202.1.160.0/19', 'SC': '154.192.0.0/11', 'SD': '154.96.0.0/13', 'SE': '78.64.0.0/12', 'SG': '152.56.0.0/14', 'SI': '188.196.0.0/14', 'SK': '78.98.0.0/15', 'SL': '197.215.0.0/17', 'SM': '89.186.32.0/19', 'SN': '41.82.0.0/15', 'SO': '197.220.64.0/19', 'SR': '186.179.128.0/17', 'SS': '105.235.208.0/21', 'ST': '197.159.160.0/19', 'SV': '168.243.0.0/16', 'SX': '190.102.0.0/20', 'SY': '5.0.0.0/16', 'SZ': '41.84.224.0/19', 'TC': '65.255.48.0/20', 'TD': '154.68.128.0/19', 'TG': '196.168.0.0/14', 'TH': '171.96.0.0/13', 'TJ': '85.9.128.0/18', 'TK': '27.96.24.0/21', 'TL': '180.189.160.0/20', 'TM': '95.85.96.0/19', 'TN': '197.0.0.0/11', 'TO': '175.176.144.0/21', 'TR': '78.160.0.0/11', 'TT': '186.44.0.0/15', 'TV': '202.2.96.0/19', 'TW': '120.96.0.0/11', 'TZ': '156.156.0.0/14', 'UA': '93.72.0.0/13', 'UG': '154.224.0.0/13', 'US': '3.0.0.0/8', 'UY': '167.56.0.0/13', 'UZ': '82.215.64.0/18', 'VA': '212.77.0.0/19', 'VC': '24.92.144.0/20', 'VE': '186.88.0.0/13', 'VG': '172.103.64.0/18', 'VI': '146.226.0.0/16', 'VN': '14.160.0.0/11', 'VU': '202.80.32.0/20', 'WF': '117.20.32.0/21', 'WS': '202.4.32.0/19', 'YE': '134.35.0.0/16', 'YT': '41.242.116.0/22', 'ZA': '41.0.0.0/11', 'ZM': '165.56.0.0/13', 'ZW': '41.85.192.0/19', } @classmethod def random_ipv4(cls, code_or_block): if len(code_or_block) == 2: block = cls._country_ip_map.get(code_or_block.upper()) if not block: return None else: block = code_or_block addr, preflen = block.split('/') addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0] addr_max = addr_min | (0xffffffff >> int(preflen)) return compat_str(socket.inet_ntoa( compat_struct_pack('!L', random.randint(addr_min, addr_max)))) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'): req.add_header('Ytdl-socks-proxy', proxy) # youtube-dl's http/https handlers do wrapping the socket with socks return None return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type) # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is # released into Public Domain # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387 def long_to_bytes(n, blocksize=0): """long_to_bytes(n:long, blocksize:int) : string Convert a long integer to a byte string. If optional blocksize is given and greater than zero, pad the front of the byte string with binary zeros so that the length is a multiple of blocksize. """ # after much testing, this algorithm was deemed to be the fastest s = b'' n = int(n) while n > 0: s = compat_struct_pack('>I', n & 0xffffffff) + s n = n >> 32 # strip off leading zeros for i in range(len(s)): if s[i] != b'\000'[0]: break else: # only happens when n == 0 s = b'\000' i = 0 s = s[i:] # add back some pad bytes. this could be done more efficiently w.r.t. the # de-padding being done above, but sigh... if blocksize > 0 and len(s) % blocksize: s = (blocksize - len(s) % blocksize) * b'\000' + s return s def bytes_to_long(s): """bytes_to_long(string) : long Convert a byte string to a long integer. This is (essentially) the inverse of long_to_bytes(). """ acc = 0 length = len(s) if length % 4: extra = (4 - length % 4) s = b'\000' * extra + s length = length + extra for i in range(0, length, 4): acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0] return acc def ohdave_rsa_encrypt(data, exponent, modulus): ''' Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/ Input: data: data to encrypt, bytes-like object exponent, modulus: parameter e and N of RSA algorithm, both integer Output: hex string of encrypted data Limitation: supports one block encryption only ''' payload = int(binascii.hexlify(data[::-1]), 16) encrypted = pow(payload, exponent, modulus) return '%x' % encrypted def pkcs1pad(data, length): """ Padding input data with PKCS#1 scheme @param {int[]} data input data @param {int} length target length @returns {int[]} padded data """ if len(data) > length - 11: raise ValueError('Input data too long for PKCS#1 padding') pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)] return [0, 2] + pseudo_random + [0] + data def encode_base_n(num, n, table=None): FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' if not table: table = FULL_TABLE[:n] if n > len(table): raise ValueError('base %d exceeds table length %d' % (n, len(table))) if num == 0: return table[0] ret = '' while num: ret = table[num % n] + ret num = num // n return ret def decode_packed_codes(code): mobj = re.search(PACKED_CODES_RE, code) obfucasted_code, base, count, symbols = mobj.groups() base = int(base) count = int(count) symbols = symbols.split('|') symbol_table = {} while count: count -= 1 base_n_count = encode_base_n(count, base) symbol_table[base_n_count] = symbols[count] or base_n_count return re.sub( r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)], obfucasted_code) def parse_m3u8_attributes(attrib): info = {} for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib): if val.startswith('"'): val = val[1:-1] info[key] = val return info def urshift(val, n): return val >> n if val >= 0 else (val + 0x100000000) >> n # Based on png2str() written by @gdkchan and improved by @yokrysty # Originally posted at https://github.com/rg3/youtube-dl/issues/9706 def decode_png(png_data): # Reference: https://www.w3.org/TR/PNG/ header = png_data[8:] if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR': raise IOError('Not a valid PNG file.') int_map = {1: '>B', 2: '>H', 4: '>I'} unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0] chunks = [] while header: length = unpack_integer(header[:4]) header = header[4:] chunk_type = header[:4] header = header[4:] chunk_data = header[:length] header = header[length:] header = header[4:] # Skip CRC chunks.append({ 'type': chunk_type, 'length': length, 'data': chunk_data }) ihdr = chunks[0]['data'] width = unpack_integer(ihdr[:4]) height = unpack_integer(ihdr[4:8]) idat = b'' for chunk in chunks: if chunk['type'] == b'IDAT': idat += chunk['data'] if not idat: raise IOError('Unable to read PNG data.') decompressed_data = bytearray(zlib.decompress(idat)) stride = width * 3 pixels = [] def _get_pixel(idx): x = idx % stride y = idx // stride return pixels[y][x] for y in range(height): basePos = y * (1 + stride) filter_type = decompressed_data[basePos] current_row = [] pixels.append(current_row) for x in range(stride): color = decompressed_data[1 + basePos + x] basex = y * stride + x left = 0 up = 0 if x > 2: left = _get_pixel(basex - 3) if y > 0: up = _get_pixel(basex - stride) if filter_type == 1: # Sub color = (color + left) & 0xff elif filter_type == 2: # Up color = (color + up) & 0xff elif filter_type == 3: # Average color = (color + ((left + up) >> 1)) & 0xff elif filter_type == 4: # Paeth a = left b = up c = 0 if x > 2 and y > 0: c = _get_pixel(basex - stride - 3) p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: color = (color + a) & 0xff elif pb <= pc: color = (color + b) & 0xff else: color = (color + c) & 0xff current_row.append(color) return width, height, pixels def write_xattr(path, key, value): # This mess below finds the best xattr tool for the job try: # try the pyxattr module... import xattr if hasattr(xattr, 'set'): # pyxattr # Unicode arguments are not supported in python-pyxattr until # version 0.5.0 # See https://github.com/rg3/youtube-dl/issues/5498 pyxattr_required_version = '0.5.0' if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version): # TODO: fallback to CLI tools raise XAttrUnavailableError( 'python-pyxattr is detected but is too old. ' 'youtube-dl requires %s or above while your version is %s. ' 'Falling back to other xattr implementations' % ( pyxattr_required_version, xattr.__version__)) setxattr = xattr.set else: # xattr setxattr = xattr.setxattr try: setxattr(path, key, value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) except ImportError: if compat_os_name == 'nt': # Write xattrs to NTFS Alternate Data Streams: # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29 assert ':' not in key assert os.path.exists(path) ads_fn = path + ':' + key try: with open(ads_fn, 'wb') as f: f.write(value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) else: user_has_setfattr = check_executable('setfattr', ['--version']) user_has_xattr = check_executable('xattr', ['-h']) if user_has_setfattr or user_has_xattr: value = value.decode('utf-8') if user_has_setfattr: executable = 'setfattr' opts = ['-n', key, '-v', value] elif user_has_xattr: executable = 'xattr' opts = ['-w', key, value] cmd = ([encodeFilename(executable, True)] + [encodeArgument(o) for o in opts] + [encodeFilename(path, True)]) try: p = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) stdout, stderr = p.communicate() stderr = stderr.decode('utf-8', 'replace') if p.returncode != 0: raise XAttrMetadataError(p.returncode, stderr) else: # On Unix, and can't find pyxattr, setfattr, or xattr. if sys.platform.startswith('linux'): raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'pyxattr' or 'xattr' " "modules, or the GNU 'attr' package " "(which contains the 'setfattr' tool).") else: raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'xattr' module, " "or the 'xattr' binary.") def random_birthday(year_field, month_field, day_field): start_date = datetime.date(1950, 1, 1) end_date = datetime.date(1995, 12, 31) offset = random.randint(0, (end_date - start_date).days) random_date = start_date + datetime.timedelta(offset) return { year_field: str(random_date.year), month_field: str(random_date.month), day_field: str(random_date.day), } #!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals import base64 import binascii import calendar import codecs import contextlib import ctypes import datetime import email.utils import email.header import errno import functools import gzip import io import itertools import json import locale import math import operator import os import platform import random import re import socket import ssl import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_HTMLParseError, compat_HTMLParser, compat_basestring, compat_chr, compat_cookiejar, compat_ctypes_WINFUNCTYPE, compat_etree_fromstring, compat_expanduser, compat_html_entities, compat_html_entities_html5, compat_http_client, compat_kwargs, compat_os_name, compat_parse_qs, compat_shlex_quote, compat_str, compat_struct_pack, compat_struct_unpack, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, compat_urllib_parse_unquote_plus, compat_urllib_request, compat_urlparse, compat_xpath, ) from .socks import ( ProxyType, sockssocket, ) def register_socks_protocols(): # "Register" SOCKS protocols # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904 # URLs with protocols not in urlparse.uses_netloc are not handled correctly for scheme in ('socks', 'socks4', 'socks4a', 'socks5'): if scheme not in compat_urlparse.uses_netloc: compat_urlparse.uses_netloc.append(scheme) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:59.0) Gecko/20100101 Firefox/59.0', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } USER_AGENTS = { 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] MONTH_NAMES = { 'en': ENGLISH_MONTH_NAMES, 'fr': [ 'janvier', 'février', 'mars', 'avril', 'mai', 'juin', 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'], } KNOWN_EXTENSIONS = ( 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', 'flv', 'f4v', 'f4a', 'f4b', 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', 'mkv', 'mka', 'mk3d', 'avi', 'divx', 'mov', 'asf', 'wmv', 'wma', '3gp', '3g2', 'mp3', 'flac', 'ape', 'wav', 'f4f', 'f4m', 'm3u8', 'smil') # needed for sanitizing filenames in restricted mode ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'], 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy'))) DATE_FORMATS = ( '%d %B %Y', '%d %b %Y', '%B %d %Y', '%B %dst %Y', '%B %dnd %Y', '%B %dth %Y', '%b %d %Y', '%b %dst %Y', '%b %dnd %Y', '%b %dth %Y', '%b %dst %Y %I:%M', '%b %dnd %Y %I:%M', '%b %dth %Y %I:%M', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', '%b %d %Y at %H:%M', '%b %d %Y at %H:%M:%S', '%B %d %Y at %H:%M', '%B %d %Y at %H:%M:%S', ) DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS) DATE_FORMATS_DAY_FIRST.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d.%m.%y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS) DATE_FORMATS_MONTH_FIRST.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)" JSON_LD_RE = r'(?is)<script[^>]+type=(["\'])application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>' def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): for f in node.findall(compat_xpath(xpath)): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): return node.find(compat_xpath(xpath)) if isinstance(xpath, (str, compat_str)): n = _find_xpath(xpath) else: for xp in xpath: n = _find_xpath(xp) if n is not None: break if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute('id', id, html) def get_element_by_class(class_name, html): """Return the content of the first tag with the specified class in the passed HTML document""" retval = get_elements_by_class(class_name, html) return retval[0] if retval else None def get_element_by_attribute(attribute, value, html, escape_value=True): retval = get_elements_by_attribute(attribute, value, html, escape_value) return retval[0] if retval else None def get_elements_by_class(class_name, html): """Return the content of all tags with the specified class in the passed HTML document as a list""" return get_elements_by_attribute( 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name), html, escape_value=False) def get_elements_by_attribute(attribute, value, html, escape_value=True): """Return the content of the tag with the specified attribute in the passed HTML document""" value = re.escape(value) if escape_value else value retlist = [] for m in re.finditer(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), value), html): res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] retlist.append(unescapeHTML(res)) return retlist class HTMLAttributeParser(compat_HTMLParser): """Trivial HTML parser to gather the attributes for a single element""" def __init__(self): self.attrs = {} compat_HTMLParser.__init__(self) def handle_starttag(self, tag, attrs): self.attrs = dict(attrs) def extract_attributes(html_element): """Given a string for an HTML element such as <el a="foo" B="bar" c="&98;az" d=boz empty= noval entity="&amp;" sq='"' dq="'" > Decode and return a dictionary of attributes. { 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz', 'empty': '', 'noval': None, 'entity': '&', 'sq': '"', 'dq': '\'' }. NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions, but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5. """ parser = HTMLAttributeParser() try: parser.feed(html_element) parser.close() # Older Python may throw HTMLParseError in case of malformed HTML except compat_HTMLParseError: pass return parser.attrs def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'(?u)\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'(?u)<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible. """ def replace_insane(char): if restricted and char in ACCENT_CHARS: return ACCENT_CHARS[char] if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) def sanitize_url(url): # Prepend protocol-less URLs with `http:` scheme in order to mitigate # the number of unwanted failures due to missing protocol if url.startswith('//'): return 'http:%s' % url # Fix some common typos seen so far COMMON_TYPOS = ( # https://github.com/rg3/youtube-dl/issues/15649 (r'^httpss://', r'https://'), # https://bx1.be/lives/direct-tv/ (r'^rmtp([es]?)://', r'rtmp\1://'), ) for mistake, fixup in COMMON_TYPOS: if re.match(mistake, url): return re.sub(mistake, fixup, url) return url def sanitized_Request(url, *args, **kwargs): return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs) def expand_path(s): """Expand shell variables and ~""" return os.path.expandvars(compat_expanduser(s)) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity_with_semicolon): """Transforms an HTML entity to a character.""" entity = entity_with_semicolon[:-1] # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) # TODO: HTML5 allows entities without a semicolon. For example, # '&Eacuteric' should be decoded as 'Éric'. if entity_with_semicolon in compat_html_entities_html5: return compat_html_entities_html5[entity_with_semicolon] mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 # See https://github.com/rg3/youtube-dl/issues/7518 try: return compat_chr(int(numstr, base)) except ValueError: pass # Unknown entity in name, return its literal representation return '&%s;' % entity def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible if sys.platform.startswith('java'): return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class YoutubeDLError(Exception): """Base exception for YoutubeDL errors.""" pass class ExtractorError(YoutubeDLError): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class GeoRestrictedError(ExtractorError): """Geographic restriction Error exception. This exception may be thrown when a video is not available from your geographic location due to geographic restrictions imposed by a website. """ def __init__(self, msg, countries=None): super(GeoRestrictedError, self).__init__(msg, expected=True) self.msg = msg self.countries = countries class DownloadError(YoutubeDLError): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(YoutubeDLError): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(YoutubeDLError): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): super(PostProcessingError, self).__init__(msg) self.msg = msg class MaxDownloadsReached(YoutubeDLError): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(YoutubeDLError): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(YoutubeDLError): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): super(ContentTooShortError, self).__init__( 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected) ) # Both in bytes self.downloaded = downloaded self.expected = expected class XAttrMetadataError(YoutubeDLError): def __init__(self, code=None, msg='Unknown error'): super(XAttrMetadataError, self).__init__(msg) self.code = code self.msg = msg # Parsing code and msg if (self.code in (errno.ENOSPC, errno.EDQUOT) or 'No space left' in self.msg or 'Disk quota excedded' in self.msg): self.reason = 'NO_SPACE' elif self.code == errno.E2BIG or 'Argument list too long' in self.msg: self.reason = 'VALUE_TOO_LONG' else: self.reason = 'NOT_SUPPORTED' class XAttrUnavailableError(YoutubeDLError): pass def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs['strict'] = True hc = http_class(*args, **compat_kwargs(kwargs)) source_address = ydl_handler._params.get('source_address') if source_address is not None: # This is to workaround _create_connection() from socket where it will try all # address data from getaddrinfo() including IPv6. This filters the result from # getaddrinfo() based on the source_address value. # This is based on the cpython socket.create_connection() function. # https://github.com/python/cpython/blob/master/Lib/socket.py#L691 def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None): host, port = address err = None addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM) af = socket.AF_INET if '.' in source_address[0] else socket.AF_INET6 ip_addrs = [addr for addr in addrs if addr[0] == af] if addrs and not ip_addrs: ip_version = 'v4' if af == socket.AF_INET else 'v6' raise socket.error( "No remote IP%s addresses available for connect, can't use '%s' as source address" % (ip_version, source_address[0])) for res in ip_addrs: af, socktype, proto, canonname, sa = res sock = None try: sock = socket.socket(af, socktype, proto) if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: sock.settimeout(timeout) sock.bind(source_address) sock.connect(sa) err = None # Explicitly break reference cycle return sock except socket.error as _: err = _ if sock is not None: sock.close() if err is not None: raise err else: raise socket.error('getaddrinfo returns an empty list') if hasattr(hc, '_create_connection'): hc._create_connection = _create_connection sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = _create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc def handle_youtubedl_headers(headers): filtered_headers = headers if 'Youtubedl-no-compression' in filtered_headers: filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') del filtered_headers['Youtubedl-no-compression'] return filtered_headers class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-no-compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): conn_class = compat_http_client.HTTPConnection socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req = update_Request(req, url=url_escaped) for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) req.headers = handle_youtubedl_headers(req.headers) if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') else: location = location.decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] if sys.version_info < (3, 0): location_escaped = location_escaped.encode('utf-8') resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response def make_socks_conn_class(base_class, socks_proxy): assert issubclass(base_class, ( compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection)) url_components = compat_urlparse.urlparse(socks_proxy) if url_components.scheme.lower() == 'socks5': socks_type = ProxyType.SOCKS5 elif url_components.scheme.lower() in ('socks', 'socks4'): socks_type = ProxyType.SOCKS4 elif url_components.scheme.lower() == 'socks4a': socks_type = ProxyType.SOCKS4A def unquote_if_non_empty(s): if not s: return s return compat_urllib_parse_unquote_plus(s) proxy_args = ( socks_type, url_components.hostname, url_components.port or 1080, True, # Remote DNS unquote_if_non_empty(url_components.username), unquote_if_non_empty(url_components.password), ) class SocksConnection(base_class): def connect(self): self.sock = sockssocket() self.sock.setproxy(*proxy_args) if type(self.timeout) in (int, float): self.sock.settimeout(self.timeout) self.sock.connect((self.host, self.port)) if isinstance(self, compat_http_client.HTTPSConnection): if hasattr(self, '_context'): # Python > 2.6 self.sock = self._context.wrap_socket( self.sock, server_hostname=self.host) else: self.sock = ssl.wrap_socket(self.sock) return SocksConnection class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} conn_class = self._https_conn_class if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, True), req, **kwargs) class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar): def save(self, filename=None, ignore_discard=False, ignore_expires=False): # Store session cookies with `expires` set to 0 instead of an empty # string for cookie in self: if cookie.expires is None: cookie.expires = 0 compat_cookiejar.MozillaCookieJar.save(self, filename, ignore_discard, ignore_expires) def load(self, filename=None, ignore_discard=False, ignore_expires=False): compat_cookiejar.MozillaCookieJar.load(self, filename, ignore_discard, ignore_expires) # Session cookies are denoted by either `expires` field set to # an empty string or 0. MozillaCookieJar only recognizes the former # (see [1]). So we need force the latter to be recognized as session # cookies on our own. # Session cookies may be important for cookies-based authentication, # e.g. usually, when user does not check 'Remember me' check box while # logging in on a site, some important cookies are stored as session # cookies so that not recognizing them will result in failed login. # 1. https://bugs.python.org/issue17164 for cookie in self: # Treat `expires=0` cookies as session cookies if cookie.expires == 0: cookie.expires = None cookie.discard = True class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def extract_timezone(date_str): m = re.search( r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group('tz'))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) return timezone, date_str def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: timezone, date_str = extract_timezone(date_str) try: date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) except ValueError: pass def date_formats(day_first=True): return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) _, date_str = extract_timezone(date_str) for expression in date_formats(day_first): try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: try: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') except ValueError: pass if upload_date is not None: return compat_str(upload_date) def unified_timestamp(date_str, day_first=True): if date_str is None: return None date_str = re.sub(r'[,|]', '', date_str) pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0 timezone, date_str = extract_timezone(date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) # Remove unrecognized timezones from ISO 8601 alike timestamps m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str) if m: date_str = date_str[:-len(m.group('tz'))] # Python only supports microseconds, so remove nanoseconds m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str) if m: date_str = m.group(1) for expression in date_formats(day_first): try: dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta) return calendar.timegm(dt.timetuple()) except ValueError: pass timetuple = email.utils.parsedate_tz(date_str) if timetuple: return calendar.timegm(timetuple) + pm_delta * 3600 def determine_ext(url, default_ext='unknown_video'): if url is None or '.' not in url: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download elif guess.rstrip('/') in KNOWN_EXTENSIONS: return guess.rstrip('/') else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad approximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, '%Y%m%d').date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = compat_ctypes_WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( ('GetStdHandle', ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = compat_ctypes_WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)(('WriteConsoleW', ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = compat_ctypes_WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(('GetFileType', ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = compat_ctypes_WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( ('GetConsoleMode', ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return compat_struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: # Some platforms, such as Jython, is missing fcntl try: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) except ImportError: UNSUPPORTED_MSG = 'file locking is not supported on this platform' def _lock_file(f, exclusive): raise IOError(UNSUPPORTED_MSG) def _unlock_file(f): raise IOError(UNSUPPORTED_MSG) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(compat_shlex_quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ url, idata = unsmuggle_url(url, {}) data.update(idata) sdata = compat_urllib_parse_urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def lookup_unit_table(unit_table, s): units_re = '|'.join(re.escape(u) for u in unit_table) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = unit_table[m.group('unit')] return int(float(num_str) * mult) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and unofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'bytes': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'kb': 1000, 'kilobytes': 1000, 'kibibytes': 1024, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'mb': 1000 ** 2, 'megabytes': 1000 ** 2, 'mebibytes': 1024 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'gb': 1000 ** 3, 'gigabytes': 1000 ** 3, 'gibibytes': 1024 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'tb': 1000 ** 4, 'terabytes': 1000 ** 4, 'tebibytes': 1024 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'pb': 1000 ** 5, 'petabytes': 1000 ** 5, 'pebibytes': 1024 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'eb': 1000 ** 6, 'exabytes': 1000 ** 6, 'exbibytes': 1024 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'zb': 1000 ** 7, 'zettabytes': 1000 ** 7, 'zebibytes': 1024 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, 'yb': 1000 ** 8, 'yottabytes': 1000 ** 8, 'yobibytes': 1024 ** 8, } return lookup_unit_table(_UNIT_TABLE, s) def parse_count(s): if s is None: return None s = s.strip() if re.match(r'^[\d,.]+$', s): return str_to_int(s) _UNIT_TABLE = { 'k': 1000, 'K': 1000, 'm': 1000 ** 2, 'M': 1000 ** 2, 'kk': 1000 ** 2, 'KK': 1000 ** 2, } return lookup_unit_table(_UNIT_TABLE, s) def parse_resolution(s): if s is None: return {} mobj = re.search(r'\b(?P<w>\d+)\s*[xX×]\s*(?P<h>\d+)\b', s) if mobj: return { 'width': int(mobj.group('w')), 'height': int(mobj.group('h')), } mobj = re.search(r'\b(\d+)[pPiI]\b', s) if mobj: return {'height': int(mobj.group(1))} mobj = re.search(r'\b([48])[kK]\b', s) if mobj: return {'height': int(mobj.group(1)) * 540} return {} def month_by_name(name, lang='en'): """ Return the number of a month by (locale-independently) English name """ month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en']) try: return month_names.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) # ctypes in Jython is not complete # http://bugs.jython.org/issue2148 if sys.platform.startswith('java'): return try: libc = ctypes.cdll.LoadLibrary('libc.so.6') except OSError: return except TypeError: # LoadLibrary in Windows Python 2.7.13 only expects # a bytestring, but since unicode_literals turns # every string into a unicode string, it fails. return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): return s[len(start):] if s is not None and s.startswith(start) else s def remove_end(s, end): return s[:-len(end)] if s is not None and s.endswith(end) else s def remove_quotes(s): if s is None or len(s) < 2: return s for quote in ('"', "'", ): if s[0] == quote and s[-1] == quote: return s[1:-1] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] def base_url(url): return re.match(r'https?://[^?#&]+/', url).group() def urljoin(base, path): if isinstance(path, bytes): path = path.decode('utf-8') if not isinstance(path, compat_str) or not path: return None if re.match(r'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path): return path if isinstance(base, bytes): base = base.decode('utf-8') if not isinstance(base, compat_str) or not re.match( r'^(?:https?:)?//', base): return None return compat_urlparse.urljoin(base, path) class HEADRequest(compat_urllib_request.Request): def get_method(self): return 'HEAD' class PUTRequest(compat_urllib_request.Request): def get_method(self): return 'PUT' def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def bool_or_none(v, default=None): return v if isinstance(v, bool) else default def strip_or_none(v): return None if v is None else v.strip() def url_or_none(url): if not url or not isinstance(url, compat_str): return None url = url.strip() return url if re.match(r'^(?:[a-zA-Z][\da-zA-Z.+-]*:)?//', url) else None def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() days, hours, mins, secs, ms = [None] * 5 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match( r'''(?ix)(?:P? (?: [0-9]+\s*y(?:ears?)?\s* )? (?: [0-9]+\s*m(?:onths?)?\s* )? (?: [0-9]+\s*w(?:eeks?)?\s* )? (?: (?P<days>[0-9]+)\s*d(?:ays?)?\s* )? T)? (?: (?P<hours>[0-9]+)\s*h(?:ours?)?\s* )? (?: (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s* )? (?: (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s* )?Z?$''', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s) if m: hours, mins = m.groups() else: return None duration = 0 if secs: duration += float(secs) if mins: duration += float(mins) * 60 if hours: duration += float(hours) * 60 * 60 if days: duration += float(days) * 24 * 60 * 60 if ms: duration += float(ms) return duration def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers # SIGTTOU if youtube-dl is run in the background. # See https://github.com/rg3/youtube-dl/issues/955#issuecomment-209789656 out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize, use_cache=True): self._pagefunc = pagefunc self._pagesize = pagesize self._use_cache = use_cache if use_cache: self._cache = {} def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = None if self._use_cache: page_results = self._cache.get(pagenum) if page_results is None: page_results = list(self._pagefunc(pagenum)) if self._use_cache: self._cache[pagenum] = page_results startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( netloc=url_parsed.netloc.encode('idna').decode('ascii'), path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii') def update_url_query(url, query): if not query: return url parsed_url = compat_urlparse.urlparse(url) qs = compat_parse_qs(parsed_url.query) qs.update(query) return compat_urlparse.urlunparse(parsed_url._replace( query=compat_urllib_parse_urlencode(qs, True))) def update_Request(req, url=None, data=None, headers={}, query={}): req_headers = req.headers.copy() req_headers.update(headers) req_data = data or req.data req_url = update_url_query(url or req.get_full_url(), query) req_get_method = req.get_method() if req_get_method == 'HEAD': req_type = HEADRequest elif req_get_method == 'PUT': req_type = PUTRequest else: req_type = compat_urllib_request.Request new_req = req_type( req_url, data=req_data, headers=req_headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) if hasattr(req, 'timeout'): new_req.timeout = req.timeout return new_req def _multipart_encode_impl(data, boundary): content_type = 'multipart/form-data; boundary=%s' % boundary out = b'' for k, v in data.items(): out += b'--' + boundary.encode('ascii') + b'\r\n' if isinstance(k, compat_str): k = k.encode('utf-8') if isinstance(v, compat_str): v = v.encode('utf-8') # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578 # suggests sending UTF-8 directly. Firefox sends UTF-8, too content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n' if boundary.encode('ascii') in content: raise ValueError('Boundary overlaps with data') out += content out += b'--' + boundary.encode('ascii') + b'--\r\n' return out, content_type def multipart_encode(data, boundary=None): ''' Encode a dict to RFC 7578-compliant form-data data: A dict where keys and values can be either Unicode or bytes-like objects. boundary: If specified a Unicode object, it's used as the boundary. Otherwise a random boundary is generated. Reference: https://tools.ietf.org/html/rfc7578 ''' has_specified_boundary = boundary is not None while True: if boundary is None: boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff)) try: out, content_type = _multipart_encode_impl(data, boundary) break except ValueError: if has_specified_boundary: raise boundary = None return out, content_type def dict_get(d, key_or_keys, default=None, skip_false_values=True): if isinstance(key_or_keys, (list, tuple)): for key in key_or_keys: if key not in d or d[key] is None or skip_false_values and not d[key]: continue return d[key] return default return d.get(key_or_keys, default) def try_get(src, getter, expected_type=None): if not isinstance(getter, (list, tuple)): getter = [getter] for get in getter: try: v = get(src) except (AttributeError, KeyError, TypeError, IndexError): pass else: if expected_type is None or isinstance(v, expected_type): return v def merge_dicts(*dicts): merged = {} for a_dict in dicts: for k, v in a_dict.items(): if v is None: continue if (k not in merged or (isinstance(v, compat_str) and v and isinstance(merged[k], compat_str) and not merged[k])): merged[k] = v return merged def encode_compat_str(string, encoding=preferredencoding(), errors='strict'): return string if isinstance(string, compat_str) else compat_str(string, encoding, errors) US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } TV_PARENTAL_GUIDELINES = { 'TV-Y': 0, 'TV-Y7': 7, 'TV-G': 0, 'TV-PG': 0, 'TV-14': 14, 'TV-MA': 17, } def parse_age_limit(s): if type(s) == int: return s if 0 <= s <= 21 else None if not isinstance(s, compat_basestring): return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) if m: return int(m.group('age')) if s in US_RATINGS: return US_RATINGS[s] m = re.match(r'^TV[_-]?(%s)$' % '|'.join(k[3:] for k in TV_PARENTAL_GUIDELINES), s) if m: return TV_PARENTAL_GUIDELINES['TV-' + m.group(1)] return None def strip_jsonp(code): return re.sub( r'''(?sx)^ (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*) (?:\s*&&\s*(?P=func_name))? \s*\(\s*(?P<callback_data>.*)\);? \s*?(?://[^\n]*)*$''', r'\g<callback_data>', code) def js_to_json(code): COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*' SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE) INTEGER_TABLE = ( (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16), (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8), ) def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v elif v.startswith('/*') or v.startswith('//') or v == ',': return "" if v[0] in ("'", '"'): v = re.sub(r'(?s)\\.|"', lambda m: { '"': '\\"', "\\'": "'", '\\\n': '', '\\x': '\\u00', }.get(m.group(0), m.group(0)), v[1:-1]) for regex, base in INTEGER_TABLE: im = re.match(regex, v) if im: i = int(im.group(1), base) return '"%d":' % i if v.endswith(':') else '%d' % i return '"%s"' % v return re.sub(r'''(?sx) "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'| {comment}|,(?={skip}[\]}}])| (?:(?<![0-9])[eE]|[a-df-zA-DF-Z_])[.a-zA-Z_0-9]*| \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?| [0-9]+(?={skip}:) '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code) def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(compat_shlex_quote(a) for a in args) def error_to_compat_str(err): err_str = str(err) # On python 2 error byte string must be decoded with proper # encoding rather than ascii if sys.version_info[0] < 3: err_str = err_str.decode(preferredencoding()) return err_str def mimetype2ext(mt): if mt is None: return None ext = { 'audio/mp4': 'm4a', # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as # it's the most popular one 'audio/mpeg': 'mp3', }.get(mt) if ext is not None: return ext _, _, res = mt.rpartition('/') res = res.split(';')[0].strip().lower() return { '3gpp': '3gp', 'smptett+xml': 'tt', 'ttaf+xml': 'dfxp', 'ttml+xml': 'ttml', 'x-flv': 'flv', 'x-mp4-fragmented': 'mp4', 'x-ms-sami': 'sami', 'x-ms-wmv': 'wmv', 'mpegurl': 'm3u8', 'x-mpegurl': 'm3u8', 'vnd.apple.mpegurl': 'm3u8', 'dash+xml': 'mpd', 'f4m+xml': 'f4m', 'hds+xml': 'f4m', 'vnd.ms-sstr+xml': 'ism', 'quicktime': 'mov', 'mp2t': 'ts', }.get(res, res) def parse_codecs(codecs_str): # http://tools.ietf.org/html/rfc6381 if not codecs_str: return {} splited_codecs = list(filter(None, map( lambda str: str.strip(), codecs_str.strip().strip(',').split(',')))) vcodec, acodec = None, None for full_codec in splited_codecs: codec = full_codec.split('.')[0] if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v', 'hvc1', 'av01'): if not vcodec: vcodec = full_codec elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'): if not acodec: acodec = full_codec else: write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr) if not vcodec and not acodec: if len(splited_codecs) == 2: return { 'vcodec': vcodec, 'acodec': acodec, } elif len(splited_codecs) == 1: return { 'vcodec': 'none', 'acodec': vcodec, } else: return { 'vcodec': vcodec or 'none', 'acodec': acodec or 'none', } return {} def urlhandle_detect_ext(url_handle): getheader = url_handle.headers.get cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<quote>["\'])(?P<quotedstrval>(?:\\.|(?!(?P=quote)|\\).)+?)(?P=quote)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) if (m.group('quotedstrval') is not None or m.group('strval') is not None or # If the original field is a string and matching comparisonvalue is # a number we should respect the origin of the original field # and process comparison value as a string (see # https://github.com/rg3/youtube-dl/issues/11082). actual_value is not None and m.group('intval') is not None and isinstance(actual_value, compat_str)): if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval') quote = m.group('quote') if quote is not None: comparison_value = comparison_value.replace(r'\%s' % quote, quote) else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: (v is True) if isinstance(v, bool) else (v is not None), '!': lambda v: (v is False) if isinstance(v, bool) else (v is None), } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.')) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): ''' @param dfxp_data A bytes-like object containing DFXP data @returns A unicode object containing converted SRT data ''' LEGACY_NAMESPACES = ( (b'http://www.w3.org/ns/ttml', [ b'http://www.w3.org/2004/11/ttaf1', b'http://www.w3.org/2006/04/ttaf1', b'http://www.w3.org/2006/10/ttaf1', ]), (b'http://www.w3.org/ns/ttml#styling', [ b'http://www.w3.org/ns/ttml#style', ]), ) SUPPORTED_STYLING = [ 'color', 'fontFamily', 'fontSize', 'fontStyle', 'fontWeight', 'textDecoration' ] _x = functools.partial(xpath_with_ns, ns_map={ 'xml': 'http://www.w3.org/XML/1998/namespace', 'ttml': 'http://www.w3.org/ns/ttml', 'tts': 'http://www.w3.org/ns/ttml#styling', }) styles = {} default_style = {} class TTMLPElementParser(object): _out = '' _unclosed_elements = [] _applied_styles = [] def start(self, tag, attrib): if tag in (_x('ttml:br'), 'br'): self._out += '\n' else: unclosed_elements = [] style = {} element_style_id = attrib.get('style') if default_style: style.update(default_style) if element_style_id: style.update(styles.get(element_style_id, {})) for prop in SUPPORTED_STYLING: prop_val = attrib.get(_x('tts:' + prop)) if prop_val: style[prop] = prop_val if style: font = '' for k, v in sorted(style.items()): if self._applied_styles and self._applied_styles[-1].get(k) == v: continue if k == 'color': font += ' color="%s"' % v elif k == 'fontSize': font += ' size="%s"' % v elif k == 'fontFamily': font += ' face="%s"' % v elif k == 'fontWeight' and v == 'bold': self._out += '<b>' unclosed_elements.append('b') elif k == 'fontStyle' and v == 'italic': self._out += '<i>' unclosed_elements.append('i') elif k == 'textDecoration' and v == 'underline': self._out += '<u>' unclosed_elements.append('u') if font: self._out += '<font' + font + '>' unclosed_elements.append('font') applied_style = {} if self._applied_styles: applied_style.update(self._applied_styles[-1]) applied_style.update(style) self._applied_styles.append(applied_style) self._unclosed_elements.append(unclosed_elements) def end(self, tag): if tag not in (_x('ttml:br'), 'br'): unclosed_elements = self._unclosed_elements.pop() for element in reversed(unclosed_elements): self._out += '</%s>' % element if unclosed_elements and self._applied_styles: self._applied_styles.pop() def data(self, data): self._out += data def close(self): return self._out.strip() def parse_node(node): target = TTMLPElementParser() parser = xml.etree.ElementTree.XMLParser(target=target) parser.feed(xml.etree.ElementTree.tostring(node)) return parser.close() for k, v in LEGACY_NAMESPACES: for ns in v: dfxp_data = dfxp_data.replace(ns, k) dfxp = compat_etree_fromstring(dfxp_data) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') repeat = False while True: for style in dfxp.findall(_x('.//ttml:style')): style_id = style.get('id') or style.get(_x('xml:id')) if not style_id: continue parent_style_id = style.get('style') if parent_style_id: if parent_style_id not in styles: repeat = True continue styles[style_id] = styles[parent_style_id].copy() for prop in SUPPORTED_STYLING: prop_val = style.get(_x('tts:' + prop)) if prop_val: styles.setdefault(style_id, {})[prop] = prop_val if repeat: repeat = False else: break for p in ('body', 'div'): ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p]) if ele is None: continue style = styles.get(ele.get('style')) if not style: continue default_style.update(style) for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) dur = parse_dfxp_time_expr(para.attrib.get('dur')) if begin_time is None: continue if not end_time: if not dur: continue end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) if param: param = compat_str(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) if param is None: return [] assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'iw': 'heb', # Replaced by he in 1989 revision 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'in': 'ind', # Replaced by id in 1989 revision 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'ji': 'yid', # Replaced by yi in 1989 revision 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class GeoUtils(object): # Major IPv4 address blocks per country _country_ip_map = { 'AD': '85.94.160.0/19', 'AE': '94.200.0.0/13', 'AF': '149.54.0.0/17', 'AG': '209.59.64.0/18', 'AI': '204.14.248.0/21', 'AL': '46.99.0.0/16', 'AM': '46.70.0.0/15', 'AO': '105.168.0.0/13', 'AP': '159.117.192.0/21', 'AR': '181.0.0.0/12', 'AS': '202.70.112.0/20', 'AT': '84.112.0.0/13', 'AU': '1.128.0.0/11', 'AW': '181.41.0.0/18', 'AZ': '5.191.0.0/16', 'BA': '31.176.128.0/17', 'BB': '65.48.128.0/17', 'BD': '114.130.0.0/16', 'BE': '57.0.0.0/8', 'BF': '129.45.128.0/17', 'BG': '95.42.0.0/15', 'BH': '37.131.0.0/17', 'BI': '154.117.192.0/18', 'BJ': '137.255.0.0/16', 'BL': '192.131.134.0/24', 'BM': '196.12.64.0/18', 'BN': '156.31.0.0/16', 'BO': '161.56.0.0/16', 'BQ': '161.0.80.0/20', 'BR': '152.240.0.0/12', 'BS': '24.51.64.0/18', 'BT': '119.2.96.0/19', 'BW': '168.167.0.0/16', 'BY': '178.120.0.0/13', 'BZ': '179.42.192.0/18', 'CA': '99.224.0.0/11', 'CD': '41.243.0.0/16', 'CF': '196.32.200.0/21', 'CG': '197.214.128.0/17', 'CH': '85.0.0.0/13', 'CI': '154.232.0.0/14', 'CK': '202.65.32.0/19', 'CL': '152.172.0.0/14', 'CM': '165.210.0.0/15', 'CN': '36.128.0.0/10', 'CO': '181.240.0.0/12', 'CR': '201.192.0.0/12', 'CU': '152.206.0.0/15', 'CV': '165.90.96.0/19', 'CW': '190.88.128.0/17', 'CY': '46.198.0.0/15', 'CZ': '88.100.0.0/14', 'DE': '53.0.0.0/8', 'DJ': '197.241.0.0/17', 'DK': '87.48.0.0/12', 'DM': '192.243.48.0/20', 'DO': '152.166.0.0/15', 'DZ': '41.96.0.0/12', 'EC': '186.68.0.0/15', 'EE': '90.190.0.0/15', 'EG': '156.160.0.0/11', 'ER': '196.200.96.0/20', 'ES': '88.0.0.0/11', 'ET': '196.188.0.0/14', 'EU': '2.16.0.0/13', 'FI': '91.152.0.0/13', 'FJ': '144.120.0.0/16', 'FM': '119.252.112.0/20', 'FO': '88.85.32.0/19', 'FR': '90.0.0.0/9', 'GA': '41.158.0.0/15', 'GB': '25.0.0.0/8', 'GD': '74.122.88.0/21', 'GE': '31.146.0.0/16', 'GF': '161.22.64.0/18', 'GG': '62.68.160.0/19', 'GH': '45.208.0.0/14', 'GI': '85.115.128.0/19', 'GL': '88.83.0.0/19', 'GM': '160.182.0.0/15', 'GN': '197.149.192.0/18', 'GP': '104.250.0.0/19', 'GQ': '105.235.224.0/20', 'GR': '94.64.0.0/13', 'GT': '168.234.0.0/16', 'GU': '168.123.0.0/16', 'GW': '197.214.80.0/20', 'GY': '181.41.64.0/18', 'HK': '113.252.0.0/14', 'HN': '181.210.0.0/16', 'HR': '93.136.0.0/13', 'HT': '148.102.128.0/17', 'HU': '84.0.0.0/14', 'ID': '39.192.0.0/10', 'IE': '87.32.0.0/12', 'IL': '79.176.0.0/13', 'IM': '5.62.80.0/20', 'IN': '117.192.0.0/10', 'IO': '203.83.48.0/21', 'IQ': '37.236.0.0/14', 'IR': '2.176.0.0/12', 'IS': '82.221.0.0/16', 'IT': '79.0.0.0/10', 'JE': '87.244.64.0/18', 'JM': '72.27.0.0/17', 'JO': '176.29.0.0/16', 'JP': '126.0.0.0/8', 'KE': '105.48.0.0/12', 'KG': '158.181.128.0/17', 'KH': '36.37.128.0/17', 'KI': '103.25.140.0/22', 'KM': '197.255.224.0/20', 'KN': '198.32.32.0/19', 'KP': '175.45.176.0/22', 'KR': '175.192.0.0/10', 'KW': '37.36.0.0/14', 'KY': '64.96.0.0/15', 'KZ': '2.72.0.0/13', 'LA': '115.84.64.0/18', 'LB': '178.135.0.0/16', 'LC': '192.147.231.0/24', 'LI': '82.117.0.0/19', 'LK': '112.134.0.0/15', 'LR': '41.86.0.0/19', 'LS': '129.232.0.0/17', 'LT': '78.56.0.0/13', 'LU': '188.42.0.0/16', 'LV': '46.109.0.0/16', 'LY': '41.252.0.0/14', 'MA': '105.128.0.0/11', 'MC': '88.209.64.0/18', 'MD': '37.246.0.0/16', 'ME': '178.175.0.0/17', 'MF': '74.112.232.0/21', 'MG': '154.126.0.0/17', 'MH': '117.103.88.0/21', 'MK': '77.28.0.0/15', 'ML': '154.118.128.0/18', 'MM': '37.111.0.0/17', 'MN': '49.0.128.0/17', 'MO': '60.246.0.0/16', 'MP': '202.88.64.0/20', 'MQ': '109.203.224.0/19', 'MR': '41.188.64.0/18', 'MS': '208.90.112.0/22', 'MT': '46.11.0.0/16', 'MU': '105.16.0.0/12', 'MV': '27.114.128.0/18', 'MW': '105.234.0.0/16', 'MX': '187.192.0.0/11', 'MY': '175.136.0.0/13', 'MZ': '197.218.0.0/15', 'NA': '41.182.0.0/16', 'NC': '101.101.0.0/18', 'NE': '197.214.0.0/18', 'NF': '203.17.240.0/22', 'NG': '105.112.0.0/12', 'NI': '186.76.0.0/15', 'NL': '145.96.0.0/11', 'NO': '84.208.0.0/13', 'NP': '36.252.0.0/15', 'NR': '203.98.224.0/19', 'NU': '49.156.48.0/22', 'NZ': '49.224.0.0/14', 'OM': '5.36.0.0/15', 'PA': '186.72.0.0/15', 'PE': '186.160.0.0/14', 'PF': '123.50.64.0/18', 'PG': '124.240.192.0/19', 'PH': '49.144.0.0/13', 'PK': '39.32.0.0/11', 'PL': '83.0.0.0/11', 'PM': '70.36.0.0/20', 'PR': '66.50.0.0/16', 'PS': '188.161.0.0/16', 'PT': '85.240.0.0/13', 'PW': '202.124.224.0/20', 'PY': '181.120.0.0/14', 'QA': '37.210.0.0/15', 'RE': '139.26.0.0/16', 'RO': '79.112.0.0/13', 'RS': '178.220.0.0/14', 'RU': '5.136.0.0/13', 'RW': '105.178.0.0/15', 'SA': '188.48.0.0/13', 'SB': '202.1.160.0/19', 'SC': '154.192.0.0/11', 'SD': '154.96.0.0/13', 'SE': '78.64.0.0/12', 'SG': '152.56.0.0/14', 'SI': '188.196.0.0/14', 'SK': '78.98.0.0/15', 'SL': '197.215.0.0/17', 'SM': '89.186.32.0/19', 'SN': '41.82.0.0/15', 'SO': '197.220.64.0/19', 'SR': '186.179.128.0/17', 'SS': '105.235.208.0/21', 'ST': '197.159.160.0/19', 'SV': '168.243.0.0/16', 'SX': '190.102.0.0/20', 'SY': '5.0.0.0/16', 'SZ': '41.84.224.0/19', 'TC': '65.255.48.0/20', 'TD': '154.68.128.0/19', 'TG': '196.168.0.0/14', 'TH': '171.96.0.0/13', 'TJ': '85.9.128.0/18', 'TK': '27.96.24.0/21', 'TL': '180.189.160.0/20', 'TM': '95.85.96.0/19', 'TN': '197.0.0.0/11', 'TO': '175.176.144.0/21', 'TR': '78.160.0.0/11', 'TT': '186.44.0.0/15', 'TV': '202.2.96.0/19', 'TW': '120.96.0.0/11', 'TZ': '156.156.0.0/14', 'UA': '93.72.0.0/13', 'UG': '154.224.0.0/13', 'US': '3.0.0.0/8', 'UY': '167.56.0.0/13', 'UZ': '82.215.64.0/18', 'VA': '212.77.0.0/19', 'VC': '24.92.144.0/20', 'VE': '186.88.0.0/13', 'VG': '172.103.64.0/18', 'VI': '146.226.0.0/16', 'VN': '14.160.0.0/11', 'VU': '202.80.32.0/20', 'WF': '117.20.32.0/21', 'WS': '202.4.32.0/19', 'YE': '134.35.0.0/16', 'YT': '41.242.116.0/22', 'ZA': '41.0.0.0/11', 'ZM': '165.56.0.0/13', 'ZW': '41.85.192.0/19', } @classmethod def random_ipv4(cls, code_or_block): if len(code_or_block) == 2: block = cls._country_ip_map.get(code_or_block.upper()) if not block: return None else: block = code_or_block addr, preflen = block.split('/') addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0] addr_max = addr_min | (0xffffffff >> int(preflen)) return compat_str(socket.inet_ntoa( compat_struct_pack('!L', random.randint(addr_min, addr_max)))) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'): req.add_header('Ytdl-socks-proxy', proxy) # youtube-dl's http/https handlers do wrapping the socket with socks return None return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type) # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is # released into Public Domain # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387 def long_to_bytes(n, blocksize=0): """long_to_bytes(n:long, blocksize:int) : string Convert a long integer to a byte string. If optional blocksize is given and greater than zero, pad the front of the byte string with binary zeros so that the length is a multiple of blocksize. """ # after much testing, this algorithm was deemed to be the fastest s = b'' n = int(n) while n > 0: s = compat_struct_pack('>I', n & 0xffffffff) + s n = n >> 32 # strip off leading zeros for i in range(len(s)): if s[i] != b'\000'[0]: break else: # only happens when n == 0 s = b'\000' i = 0 s = s[i:] # add back some pad bytes. this could be done more efficiently w.r.t. the # de-padding being done above, but sigh... if blocksize > 0 and len(s) % blocksize: s = (blocksize - len(s) % blocksize) * b'\000' + s return s def bytes_to_long(s): """bytes_to_long(string) : long Convert a byte string to a long integer. This is (essentially) the inverse of long_to_bytes(). """ acc = 0 length = len(s) if length % 4: extra = (4 - length % 4) s = b'\000' * extra + s length = length + extra for i in range(0, length, 4): acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0] return acc def ohdave_rsa_encrypt(data, exponent, modulus): ''' Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/ Input: data: data to encrypt, bytes-like object exponent, modulus: parameter e and N of RSA algorithm, both integer Output: hex string of encrypted data Limitation: supports one block encryption only ''' payload = int(binascii.hexlify(data[::-1]), 16) encrypted = pow(payload, exponent, modulus) return '%x' % encrypted def pkcs1pad(data, length): """ Padding input data with PKCS#1 scheme @param {int[]} data input data @param {int} length target length @returns {int[]} padded data """ if len(data) > length - 11: raise ValueError('Input data too long for PKCS#1 padding') pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)] return [0, 2] + pseudo_random + [0] + data def encode_base_n(num, n, table=None): FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' if not table: table = FULL_TABLE[:n] if n > len(table): raise ValueError('base %d exceeds table length %d' % (n, len(table))) if num == 0: return table[0] ret = '' while num: ret = table[num % n] + ret num = num // n return ret def decode_packed_codes(code): mobj = re.search(PACKED_CODES_RE, code) obfucasted_code, base, count, symbols = mobj.groups() base = int(base) count = int(count) symbols = symbols.split('|') symbol_table = {} while count: count -= 1 base_n_count = encode_base_n(count, base) symbol_table[base_n_count] = symbols[count] or base_n_count return re.sub( r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)], obfucasted_code) def parse_m3u8_attributes(attrib): info = {} for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib): if val.startswith('"'): val = val[1:-1] info[key] = val return info def urshift(val, n): return val >> n if val >= 0 else (val + 0x100000000) >> n # Based on png2str() written by @gdkchan and improved by @yokrysty # Originally posted at https://github.com/rg3/youtube-dl/issues/9706 def decode_png(png_data): # Reference: https://www.w3.org/TR/PNG/ header = png_data[8:] if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR': raise IOError('Not a valid PNG file.') int_map = {1: '>B', 2: '>H', 4: '>I'} unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0] chunks = [] while header: length = unpack_integer(header[:4]) header = header[4:] chunk_type = header[:4] header = header[4:] chunk_data = header[:length] header = header[length:] header = header[4:] # Skip CRC chunks.append({ 'type': chunk_type, 'length': length, 'data': chunk_data }) ihdr = chunks[0]['data'] width = unpack_integer(ihdr[:4]) height = unpack_integer(ihdr[4:8]) idat = b'' for chunk in chunks: if chunk['type'] == b'IDAT': idat += chunk['data'] if not idat: raise IOError('Unable to read PNG data.') decompressed_data = bytearray(zlib.decompress(idat)) stride = width * 3 pixels = [] def _get_pixel(idx): x = idx % stride y = idx // stride return pixels[y][x] for y in range(height): basePos = y * (1 + stride) filter_type = decompressed_data[basePos] current_row = [] pixels.append(current_row) for x in range(stride): color = decompressed_data[1 + basePos + x] basex = y * stride + x left = 0 up = 0 if x > 2: left = _get_pixel(basex - 3) if y > 0: up = _get_pixel(basex - stride) if filter_type == 1: # Sub color = (color + left) & 0xff elif filter_type == 2: # Up color = (color + up) & 0xff elif filter_type == 3: # Average color = (color + ((left + up) >> 1)) & 0xff elif filter_type == 4: # Paeth a = left b = up c = 0 if x > 2 and y > 0: c = _get_pixel(basex - stride - 3) p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: color = (color + a) & 0xff elif pb <= pc: color = (color + b) & 0xff else: color = (color + c) & 0xff current_row.append(color) return width, height, pixels def write_xattr(path, key, value): # This mess below finds the best xattr tool for the job try: # try the pyxattr module... import xattr if hasattr(xattr, 'set'): # pyxattr # Unicode arguments are not supported in python-pyxattr until # version 0.5.0 # See https://github.com/rg3/youtube-dl/issues/5498 pyxattr_required_version = '0.5.0' if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version): # TODO: fallback to CLI tools raise XAttrUnavailableError( 'python-pyxattr is detected but is too old. ' 'youtube-dl requires %s or above while your version is %s. ' 'Falling back to other xattr implementations' % ( pyxattr_required_version, xattr.__version__)) setxattr = xattr.set else: # xattr setxattr = xattr.setxattr try: setxattr(path, key, value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) except ImportError: if compat_os_name == 'nt': # Write xattrs to NTFS Alternate Data Streams: # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29 assert ':' not in key assert os.path.exists(path) ads_fn = path + ':' + key try: with open(ads_fn, 'wb') as f: f.write(value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) else: user_has_setfattr = check_executable('setfattr', ['--version']) user_has_xattr = check_executable('xattr', ['-h']) if user_has_setfattr or user_has_xattr: value = value.decode('utf-8') if user_has_setfattr: executable = 'setfattr' opts = ['-n', key, '-v', value] elif user_has_xattr: executable = 'xattr' opts = ['-w', key, value] cmd = ([encodeFilename(executable, True)] + [encodeArgument(o) for o in opts] + [encodeFilename(path, True)]) try: p = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) stdout, stderr = p.communicate() stderr = stderr.decode('utf-8', 'replace') if p.returncode != 0: raise XAttrMetadataError(p.returncode, stderr) else: # On Unix, and can't find pyxattr, setfattr, or xattr. if sys.platform.startswith('linux'): raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'pyxattr' or 'xattr' " "modules, or the GNU 'attr' package " "(which contains the 'setfattr' tool).") else: raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'xattr' module, " "or the 'xattr' binary.") def random_birthday(year_field, month_field, day_field): start_date = datetime.date(1950, 1, 1) end_date = datetime.date(1995, 12, 31) offset = random.randint(0, (end_date - start_date).days) random_date = start_date + datetime.timedelta(offset) return { year_field: str(random_date.year), month_field: str(random_date.month), day_field: str(random_date.day), }
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-13-fixed/youtube-dl/youtube_dl/utils.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-13-buggy/youtube-dl/youtube_dl/utils.py
youtube-dl-bug-20
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals import base64 import binascii import calendar import codecs import contextlib import ctypes import datetime import email.utils import email.header import errno import functools import gzip import io import itertools import json import locale import math import operator import os import platform import random import re import socket import ssl import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_HTMLParseError, compat_HTMLParser, compat_basestring, compat_chr, compat_etree_fromstring, compat_expanduser, compat_html_entities, compat_html_entities_html5, compat_http_client, compat_kwargs, compat_os_name, compat_parse_qs, compat_shlex_quote, compat_socket_create_connection, compat_str, compat_struct_pack, compat_struct_unpack, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, compat_urllib_parse_unquote_plus, compat_urllib_request, compat_urlparse, compat_xpath, ) from .socks import ( ProxyType, sockssocket, ) def register_socks_protocols(): # "Register" SOCKS protocols # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904 # URLs with protocols not in urlparse.uses_netloc are not handled correctly for scheme in ('socks', 'socks4', 'socks4a', 'socks5'): if scheme not in compat_urlparse.uses_netloc: compat_urlparse.uses_netloc.append(scheme) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } USER_AGENTS = { 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] MONTH_NAMES = { 'en': ENGLISH_MONTH_NAMES, 'fr': [ 'janvier', 'février', 'mars', 'avril', 'mai', 'juin', 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'], } KNOWN_EXTENSIONS = ( 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', 'flv', 'f4v', 'f4a', 'f4b', 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', 'mkv', 'mka', 'mk3d', 'avi', 'divx', 'mov', 'asf', 'wmv', 'wma', '3gp', '3g2', 'mp3', 'flac', 'ape', 'wav', 'f4f', 'f4m', 'm3u8', 'smil') # needed for sanitizing filenames in restricted mode ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'], 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy'))) DATE_FORMATS = ( '%d %B %Y', '%d %b %Y', '%B %d %Y', '%B %dst %Y', '%B %dnd %Y', '%B %dth %Y', '%b %d %Y', '%b %dst %Y', '%b %dnd %Y', '%b %dth %Y', '%b %dst %Y %I:%M', '%b %dnd %Y %I:%M', '%b %dth %Y %I:%M', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', '%b %d %Y at %H:%M', '%b %d %Y at %H:%M:%S', ) DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS) DATE_FORMATS_DAY_FIRST.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d.%m.%y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS) DATE_FORMATS_MONTH_FIRST.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)" def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): for f in node.findall(compat_xpath(xpath)): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): return node.find(compat_xpath(xpath)) if isinstance(xpath, (str, compat_str)): n = _find_xpath(xpath) else: for xp in xpath: n = _find_xpath(xp) if n is not None: break if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute('id', id, html) def get_element_by_class(class_name, html): """Return the content of the first tag with the specified class in the passed HTML document""" retval = get_elements_by_class(class_name, html) return retval[0] if retval else None def get_element_by_attribute(attribute, value, html, escape_value=True): retval = get_elements_by_attribute(attribute, value, html, escape_value) return retval[0] if retval else None def get_elements_by_class(class_name, html): """Return the content of all tags with the specified class in the passed HTML document as a list""" return get_elements_by_attribute( 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name), html, escape_value=False) def get_elements_by_attribute(attribute, value, html, escape_value=True): """Return the content of the tag with the specified attribute in the passed HTML document""" value = re.escape(value) if escape_value else value retlist = [] for m in re.finditer(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), value), html): res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] retlist.append(unescapeHTML(res)) return retlist class HTMLAttributeParser(compat_HTMLParser): """Trivial HTML parser to gather the attributes for a single element""" def __init__(self): self.attrs = {} compat_HTMLParser.__init__(self) def handle_starttag(self, tag, attrs): self.attrs = dict(attrs) def extract_attributes(html_element): """Given a string for an HTML element such as <el a="foo" B="bar" c="&98;az" d=boz empty= noval entity="&amp;" sq='"' dq="'" > Decode and return a dictionary of attributes. { 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz', 'empty': '', 'noval': None, 'entity': '&', 'sq': '"', 'dq': '\'' }. NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions, but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5. """ parser = HTMLAttributeParser() try: parser.feed(html_element) parser.close() # Older Python may throw HTMLParseError in case of malformed HTML except compat_HTMLParseError: pass return parser.attrs def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'(?u)\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'(?u)<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible. """ def replace_insane(char): if restricted and char in ACCENT_CHARS: return ACCENT_CHARS[char] if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of # unwanted failures due to missing protocol def sanitize_url(url): return 'http:%s' % url if url.startswith('//') else url def sanitized_Request(url, *args, **kwargs): return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs) def expand_path(s): """Expand shell variables and ~""" return os.path.expandvars(compat_expanduser(s)) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity_with_semicolon): """Transforms an HTML entity to a character.""" entity = entity_with_semicolon[:-1] # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) # TODO: HTML5 allows entities without a semicolon. For example, # '&Eacuteric' should be decoded as 'Éric'. if entity_with_semicolon in compat_html_entities_html5: return compat_html_entities_html5[entity_with_semicolon] mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 # See https://github.com/rg3/youtube-dl/issues/7518 try: return compat_chr(int(numstr, base)) except ValueError: pass # Unknown entity in name, return its literal representation return '&%s;' % entity def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+;)', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible if sys.platform.startswith('java'): return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class YoutubeDLError(Exception): """Base exception for YoutubeDL errors.""" pass class ExtractorError(YoutubeDLError): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class GeoRestrictedError(ExtractorError): """Geographic restriction Error exception. This exception may be thrown when a video is not available from your geographic location due to geographic restrictions imposed by a website. """ def __init__(self, msg, countries=None): super(GeoRestrictedError, self).__init__(msg, expected=True) self.msg = msg self.countries = countries class DownloadError(YoutubeDLError): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(YoutubeDLError): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(YoutubeDLError): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): super(PostProcessingError, self).__init__(msg) self.msg = msg class MaxDownloadsReached(YoutubeDLError): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(YoutubeDLError): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(YoutubeDLError): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): super(ContentTooShortError, self).__init__( 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected) ) # Both in bytes self.downloaded = downloaded self.expected = expected class XAttrMetadataError(YoutubeDLError): def __init__(self, code=None, msg='Unknown error'): super(XAttrMetadataError, self).__init__(msg) self.code = code self.msg = msg # Parsing code and msg if (self.code in (errno.ENOSPC, errno.EDQUOT) or 'No space left' in self.msg or 'Disk quota excedded' in self.msg): self.reason = 'NO_SPACE' elif self.code == errno.E2BIG or 'Argument list too long' in self.msg: self.reason = 'VALUE_TOO_LONG' else: self.reason = 'NOT_SUPPORTED' class XAttrUnavailableError(YoutubeDLError): pass def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs[b'strict'] = True hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc def handle_youtubedl_headers(headers): filtered_headers = headers if 'Youtubedl-no-compression' in filtered_headers: filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') del filtered_headers['Youtubedl-no-compression'] return filtered_headers class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-no-compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): conn_class = compat_http_client.HTTPConnection socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req = update_Request(req, url=url_escaped) for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) req.headers = handle_youtubedl_headers(req.headers) if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') else: location = location.decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] if sys.version_info < (3, 0): location_escaped = location_escaped.encode('utf-8') resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response def make_socks_conn_class(base_class, socks_proxy): assert issubclass(base_class, ( compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection)) url_components = compat_urlparse.urlparse(socks_proxy) if url_components.scheme.lower() == 'socks5': socks_type = ProxyType.SOCKS5 elif url_components.scheme.lower() in ('socks', 'socks4'): socks_type = ProxyType.SOCKS4 elif url_components.scheme.lower() == 'socks4a': socks_type = ProxyType.SOCKS4A def unquote_if_non_empty(s): if not s: return s return compat_urllib_parse_unquote_plus(s) proxy_args = ( socks_type, url_components.hostname, url_components.port or 1080, True, # Remote DNS unquote_if_non_empty(url_components.username), unquote_if_non_empty(url_components.password), ) class SocksConnection(base_class): def connect(self): self.sock = sockssocket() self.sock.setproxy(*proxy_args) if type(self.timeout) in (int, float): self.sock.settimeout(self.timeout) self.sock.connect((self.host, self.port)) if isinstance(self, compat_http_client.HTTPSConnection): if hasattr(self, '_context'): # Python > 2.6 self.sock = self._context.wrap_socket( self.sock, server_hostname=self.host) else: self.sock = ssl.wrap_socket(self.sock) return SocksConnection class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} conn_class = self._https_conn_class if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, True), req, **kwargs) class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def extract_timezone(date_str): m = re.search( r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group('tz'))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) return timezone, date_str def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: timezone, date_str = extract_timezone(date_str) try: date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) except ValueError: pass def date_formats(day_first=True): return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) _, date_str = extract_timezone(date_str) for expression in date_formats(day_first): try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: try: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') except ValueError: pass if upload_date is not None: return compat_str(upload_date) def unified_timestamp(date_str, day_first=True): if date_str is None: return None date_str = re.sub(r'[,|]', '', date_str) pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0 timezone, date_str = extract_timezone(date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) # Remove unrecognized timezones from ISO 8601 alike timestamps m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str) if m: date_str = date_str[:-len(m.group('tz'))] for expression in date_formats(day_first): try: dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta) return calendar.timegm(dt.timetuple()) except ValueError: pass timetuple = email.utils.parsedate_tz(date_str) if timetuple: return calendar.timegm(timetuple) + pm_delta * 3600 def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download elif guess.rstrip('/') in KNOWN_EXTENSIONS: return guess.rstrip('/') else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad approximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, '%Y%m%d').date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( (b'GetStdHandle', ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( (b'GetConsoleMode', ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return compat_struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: # Some platforms, such as Jython, is missing fcntl try: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) except ImportError: UNSUPPORTED_MSG = 'file locking is not supported on this platform' def _lock_file(f, exclusive): raise IOError(UNSUPPORTED_MSG) def _unlock_file(f): raise IOError(UNSUPPORTED_MSG) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(compat_shlex_quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ url, idata = unsmuggle_url(url, {}) data.update(idata) sdata = compat_urllib_parse_urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def lookup_unit_table(unit_table, s): units_re = '|'.join(re.escape(u) for u in unit_table) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = unit_table[m.group('unit')] return int(float(num_str) * mult) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and unofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'bytes': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'kb': 1000, 'kilobytes': 1000, 'kibibytes': 1024, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'mb': 1000 ** 2, 'megabytes': 1000 ** 2, 'mebibytes': 1024 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'gb': 1000 ** 3, 'gigabytes': 1000 ** 3, 'gibibytes': 1024 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'tb': 1000 ** 4, 'terabytes': 1000 ** 4, 'tebibytes': 1024 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'pb': 1000 ** 5, 'petabytes': 1000 ** 5, 'pebibytes': 1024 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'eb': 1000 ** 6, 'exabytes': 1000 ** 6, 'exbibytes': 1024 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'zb': 1000 ** 7, 'zettabytes': 1000 ** 7, 'zebibytes': 1024 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, 'yb': 1000 ** 8, 'yottabytes': 1000 ** 8, 'yobibytes': 1024 ** 8, } return lookup_unit_table(_UNIT_TABLE, s) def parse_count(s): if s is None: return None s = s.strip() if re.match(r'^[\d,.]+$', s): return str_to_int(s) _UNIT_TABLE = { 'k': 1000, 'K': 1000, 'm': 1000 ** 2, 'M': 1000 ** 2, 'kk': 1000 ** 2, 'KK': 1000 ** 2, } return lookup_unit_table(_UNIT_TABLE, s) def month_by_name(name, lang='en'): """ Return the number of a month by (locale-independently) English name """ month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en']) try: return month_names.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) # ctypes in Jython is not complete # http://bugs.jython.org/issue2148 if sys.platform.startswith('java'): return try: libc = ctypes.cdll.LoadLibrary('libc.so.6') except OSError: return except TypeError: # LoadLibrary in Windows Python 2.7.13 only expects # a bytestring, but since unicode_literals turns # every string into a unicode string, it fails. return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): return s[len(start):] if s is not None and s.startswith(start) else s def remove_end(s, end): return s[:-len(end)] if s is not None and s.endswith(end) else s def remove_quotes(s): if s is None or len(s) < 2: return s for quote in ('"', "'", ): if s[0] == quote and s[-1] == quote: return s[1:-1] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] def base_url(url): return re.match(r'https?://[^?#&]+/', url).group() def urljoin(base, path): if isinstance(path, bytes): path = path.decode('utf-8') if not isinstance(path, compat_str) or not path: return None if re.match(r'^(?:https?:)?//', path): return path if isinstance(base, bytes): base = base.decode('utf-8') if not isinstance(base, compat_str) or not re.match( r'^(?:https?:)?//', base): return None return compat_urlparse.urljoin(base, path) class HEADRequest(compat_urllib_request.Request): def get_method(self): return 'HEAD' class PUTRequest(compat_urllib_request.Request): def get_method(self): return 'PUT' def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def strip_or_none(v): return None if v is None else v.strip() def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() days, hours, mins, secs, ms = [None] * 5 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match( r'''(?ix)(?:P?T)? (?: (?P<days>[0-9]+)\s*d(?:ays?)?\s* )? (?: (?P<hours>[0-9]+)\s*h(?:ours?)?\s* )? (?: (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s* )? (?: (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s* )?Z?$''', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s) if m: hours, mins = m.groups() else: return None duration = 0 if secs: duration += float(secs) if mins: duration += float(mins) * 60 if hours: duration += float(hours) * 60 * 60 if days: duration += float(days) * 24 * 60 * 60 if ms: duration += float(ms) return duration def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers # SIGTTOU if youtube-dl is run in the background. # See https://github.com/rg3/youtube-dl/issues/955#issuecomment-209789656 out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize, use_cache=False): self._pagefunc = pagefunc self._pagesize = pagesize self._use_cache = use_cache if use_cache: self._cache = {} def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = None if self._use_cache: page_results = self._cache.get(pagenum) if page_results is None: page_results = list(self._pagefunc(pagenum)) if self._use_cache: self._cache[pagenum] = page_results startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( netloc=url_parsed.netloc.encode('idna').decode('ascii'), path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii') def update_url_query(url, query): if not query: return url parsed_url = compat_urlparse.urlparse(url) qs = compat_parse_qs(parsed_url.query) qs.update(query) return compat_urlparse.urlunparse(parsed_url._replace( query=compat_urllib_parse_urlencode(qs, True))) def update_Request(req, url=None, data=None, headers={}, query={}): req_headers = req.headers.copy() req_headers.update(headers) req_data = data or req.data req_url = update_url_query(url or req.get_full_url(), query) req_get_method = req.get_method() if req_get_method == 'HEAD': req_type = HEADRequest elif req_get_method == 'PUT': req_type = PUTRequest else: req_type = compat_urllib_request.Request new_req = req_type( req_url, data=req_data, headers=req_headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) if hasattr(req, 'timeout'): new_req.timeout = req.timeout return new_req def _multipart_encode_impl(data, boundary): content_type = 'multipart/form-data; boundary=%s' % boundary out = b'' for k, v in data.items(): out += b'--' + boundary.encode('ascii') + b'\r\n' if isinstance(k, compat_str): k = k.encode('utf-8') if isinstance(v, compat_str): v = v.encode('utf-8') # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578 # suggests sending UTF-8 directly. Firefox sends UTF-8, too content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n' if boundary.encode('ascii') in content: raise ValueError('Boundary overlaps with data') out += content out += b'--' + boundary.encode('ascii') + b'--\r\n' return out, content_type def multipart_encode(data, boundary=None): ''' Encode a dict to RFC 7578-compliant form-data data: A dict where keys and values can be either Unicode or bytes-like objects. boundary: If specified a Unicode object, it's used as the boundary. Otherwise a random boundary is generated. Reference: https://tools.ietf.org/html/rfc7578 ''' has_specified_boundary = boundary is not None while True: if boundary is None: boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff)) try: out, content_type = _multipart_encode_impl(data, boundary) break except ValueError: if has_specified_boundary: raise boundary = None return out, content_type def dict_get(d, key_or_keys, default=None, skip_false_values=True): if isinstance(key_or_keys, (list, tuple)): for key in key_or_keys: if key not in d or d[key] is None or skip_false_values and not d[key]: continue return d[key] return default return d.get(key_or_keys, default) def try_get(src, getter, expected_type=None): if not isinstance(getter, (list, tuple)): getter = [getter] for get in getter: try: v = get(src) except (AttributeError, KeyError, TypeError, IndexError): pass else: if expected_type is None or isinstance(v, expected_type): return v def encode_compat_str(string, encoding=preferredencoding(), errors='strict'): return string if isinstance(string, compat_str) else compat_str(string, encoding, errors) US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } TV_PARENTAL_GUIDELINES = { 'TV-Y': 0, 'TV-Y7': 7, 'TV-G': 0, 'TV-PG': 0, 'TV-14': 14, 'TV-MA': 17, } def parse_age_limit(s): if type(s) == int: return s if 0 <= s <= 21 else None if not isinstance(s, compat_basestring): return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) if m: return int(m.group('age')) if s in US_RATINGS: return US_RATINGS[s] return TV_PARENTAL_GUIDELINES.get(s) def strip_jsonp(code): return re.sub( r'''(?sx)^ (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]+) (?:\s*&&\s*(?P=func_name))? \s*\(\s*(?P<callback_data>.*)\);? \s*?(?://[^\n]*)*$''', r'\g<callback_data>', code) def js_to_json(code): COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*' SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE) INTEGER_TABLE = ( (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16), (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8), ) def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v elif v.startswith('/*') or v.startswith('//') or v == ',': return "" if v[0] in ("'", '"'): v = re.sub(r'(?s)\\.|"', lambda m: { '"': '\\"', "\\'": "'", '\\\n': '', '\\x': '\\u00', }.get(m.group(0), m.group(0)), v[1:-1]) for regex, base in INTEGER_TABLE: im = re.match(regex, v) if im: i = int(im.group(1), base) return '"%d":' % i if v.endswith(':') else '%d' % i return '"%s"' % v return re.sub(r'''(?sx) "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'| {comment}|,(?={skip}[\]}}])| [a-zA-Z_][.a-zA-Z_0-9]*| \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?| [0-9]+(?={skip}:) '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code) def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(compat_shlex_quote(a) for a in args) def error_to_compat_str(err): err_str = str(err) # On python 2 error byte string must be decoded with proper # encoding rather than ascii if sys.version_info[0] < 3: err_str = err_str.decode(preferredencoding()) return err_str def mimetype2ext(mt): if mt is None: return None ext = { 'audio/mp4': 'm4a', # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as # it's the most popular one 'audio/mpeg': 'mp3', }.get(mt) if ext is not None: return ext _, _, res = mt.rpartition('/') res = res.split(';')[0].strip().lower() return { '3gpp': '3gp', 'smptett+xml': 'tt', 'ttaf+xml': 'dfxp', 'ttml+xml': 'ttml', 'x-flv': 'flv', 'x-mp4-fragmented': 'mp4', 'x-ms-wmv': 'wmv', 'mpegurl': 'm3u8', 'x-mpegurl': 'm3u8', 'vnd.apple.mpegurl': 'm3u8', 'dash+xml': 'mpd', 'f4m+xml': 'f4m', 'hds+xml': 'f4m', 'vnd.ms-sstr+xml': 'ism', 'quicktime': 'mov', 'mp2t': 'ts', }.get(res, res) def parse_codecs(codecs_str): # http://tools.ietf.org/html/rfc6381 if not codecs_str: return {} splited_codecs = list(filter(None, map( lambda str: str.strip(), codecs_str.strip().strip(',').split(',')))) vcodec, acodec = None, None for full_codec in splited_codecs: codec = full_codec.split('.')[0] if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v'): if not vcodec: vcodec = full_codec elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'): if not acodec: acodec = full_codec else: write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr) if not vcodec and not acodec: if len(splited_codecs) == 2: return { 'vcodec': vcodec, 'acodec': acodec, } elif len(splited_codecs) == 1: return { 'vcodec': 'none', 'acodec': vcodec, } else: return { 'vcodec': vcodec or 'none', 'acodec': acodec or 'none', } return {} def urlhandle_detect_ext(url_handle): getheader = url_handle.headers.get cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<quote>["\'])(?P<quotedstrval>(?:\\.|(?!(?P=quote)|\\).)+?)(?P=quote)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) if (m.group('quotedstrval') is not None or m.group('strval') is not None or # If the original field is a string and matching comparisonvalue is # a number we should respect the origin of the original field # and process comparison value as a string (see # https://github.com/rg3/youtube-dl/issues/11082). actual_value is not None and m.group('intval') is not None and isinstance(actual_value, compat_str)): if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval') quote = m.group('quote') if quote is not None: comparison_value = comparison_value.replace(r'\%s' % quote, quote) else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.')) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): LEGACY_NAMESPACES = ( ('http://www.w3.org/ns/ttml', [ 'http://www.w3.org/2004/11/ttaf1', 'http://www.w3.org/2006/04/ttaf1', 'http://www.w3.org/2006/10/ttaf1', ]), ('http://www.w3.org/ns/ttml#styling', [ 'http://www.w3.org/ns/ttml#style', ]), ) SUPPORTED_STYLING = [ 'color', 'fontFamily', 'fontSize', 'fontStyle', 'fontWeight', 'textDecoration' ] _x = functools.partial(xpath_with_ns, ns_map={ 'ttml': 'http://www.w3.org/ns/ttml', 'tts': 'http://www.w3.org/ns/ttml#styling', }) styles = {} default_style = {} class TTMLPElementParser(object): _out = '' _unclosed_elements = [] _applied_styles = [] def start(self, tag, attrib): if tag in (_x('ttml:br'), 'br'): self._out += '\n' else: unclosed_elements = [] style = {} element_style_id = attrib.get('style') if default_style: style.update(default_style) if element_style_id: style.update(styles.get(element_style_id, {})) for prop in SUPPORTED_STYLING: prop_val = attrib.get(_x('tts:' + prop)) if prop_val: style[prop] = prop_val if style: font = '' for k, v in sorted(style.items()): if self._applied_styles and self._applied_styles[-1].get(k) == v: continue if k == 'color': font += ' color="%s"' % v elif k == 'fontSize': font += ' size="%s"' % v elif k == 'fontFamily': font += ' face="%s"' % v elif k == 'fontWeight' and v == 'bold': self._out += '<b>' unclosed_elements.append('b') elif k == 'fontStyle' and v == 'italic': self._out += '<i>' unclosed_elements.append('i') elif k == 'textDecoration' and v == 'underline': self._out += '<u>' unclosed_elements.append('u') if font: self._out += '<font' + font + '>' unclosed_elements.append('font') applied_style = {} if self._applied_styles: applied_style.update(self._applied_styles[-1]) applied_style.update(style) self._applied_styles.append(applied_style) self._unclosed_elements.append(unclosed_elements) def end(self, tag): if tag not in (_x('ttml:br'), 'br'): unclosed_elements = self._unclosed_elements.pop() for element in reversed(unclosed_elements): self._out += '</%s>' % element if unclosed_elements and self._applied_styles: self._applied_styles.pop() def data(self, data): self._out += data def close(self): return self._out.strip() def parse_node(node): target = TTMLPElementParser() parser = xml.etree.ElementTree.XMLParser(target=target) parser.feed(xml.etree.ElementTree.tostring(node)) return parser.close() for k, v in LEGACY_NAMESPACES: for ns in v: dfxp_data = dfxp_data.replace(ns, k) dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8')) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') repeat = False while True: for style in dfxp.findall(_x('.//ttml:style')): style_id = style.get('id') parent_style_id = style.get('style') if parent_style_id: if parent_style_id not in styles: repeat = True continue styles[style_id] = styles[parent_style_id].copy() for prop in SUPPORTED_STYLING: prop_val = style.get(_x('tts:' + prop)) if prop_val: styles.setdefault(style_id, {})[prop] = prop_val if repeat: repeat = False else: break for p in ('body', 'div'): ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p]) if ele is None: continue style = styles.get(ele.get('style')) if not style: continue default_style.update(style) for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) dur = parse_dfxp_time_expr(para.attrib.get('dur')) if begin_time is None: continue if not end_time: if not dur: continue end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) if param: param = compat_str(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class GeoUtils(object): # Major IPv4 address blocks per country _country_ip_map = { 'AD': '85.94.160.0/19', 'AE': '94.200.0.0/13', 'AF': '149.54.0.0/17', 'AG': '209.59.64.0/18', 'AI': '204.14.248.0/21', 'AL': '46.99.0.0/16', 'AM': '46.70.0.0/15', 'AO': '105.168.0.0/13', 'AP': '159.117.192.0/21', 'AR': '181.0.0.0/12', 'AS': '202.70.112.0/20', 'AT': '84.112.0.0/13', 'AU': '1.128.0.0/11', 'AW': '181.41.0.0/18', 'AZ': '5.191.0.0/16', 'BA': '31.176.128.0/17', 'BB': '65.48.128.0/17', 'BD': '114.130.0.0/16', 'BE': '57.0.0.0/8', 'BF': '129.45.128.0/17', 'BG': '95.42.0.0/15', 'BH': '37.131.0.0/17', 'BI': '154.117.192.0/18', 'BJ': '137.255.0.0/16', 'BL': '192.131.134.0/24', 'BM': '196.12.64.0/18', 'BN': '156.31.0.0/16', 'BO': '161.56.0.0/16', 'BQ': '161.0.80.0/20', 'BR': '152.240.0.0/12', 'BS': '24.51.64.0/18', 'BT': '119.2.96.0/19', 'BW': '168.167.0.0/16', 'BY': '178.120.0.0/13', 'BZ': '179.42.192.0/18', 'CA': '99.224.0.0/11', 'CD': '41.243.0.0/16', 'CF': '196.32.200.0/21', 'CG': '197.214.128.0/17', 'CH': '85.0.0.0/13', 'CI': '154.232.0.0/14', 'CK': '202.65.32.0/19', 'CL': '152.172.0.0/14', 'CM': '165.210.0.0/15', 'CN': '36.128.0.0/10', 'CO': '181.240.0.0/12', 'CR': '201.192.0.0/12', 'CU': '152.206.0.0/15', 'CV': '165.90.96.0/19', 'CW': '190.88.128.0/17', 'CY': '46.198.0.0/15', 'CZ': '88.100.0.0/14', 'DE': '53.0.0.0/8', 'DJ': '197.241.0.0/17', 'DK': '87.48.0.0/12', 'DM': '192.243.48.0/20', 'DO': '152.166.0.0/15', 'DZ': '41.96.0.0/12', 'EC': '186.68.0.0/15', 'EE': '90.190.0.0/15', 'EG': '156.160.0.0/11', 'ER': '196.200.96.0/20', 'ES': '88.0.0.0/11', 'ET': '196.188.0.0/14', 'EU': '2.16.0.0/13', 'FI': '91.152.0.0/13', 'FJ': '144.120.0.0/16', 'FM': '119.252.112.0/20', 'FO': '88.85.32.0/19', 'FR': '90.0.0.0/9', 'GA': '41.158.0.0/15', 'GB': '25.0.0.0/8', 'GD': '74.122.88.0/21', 'GE': '31.146.0.0/16', 'GF': '161.22.64.0/18', 'GG': '62.68.160.0/19', 'GH': '45.208.0.0/14', 'GI': '85.115.128.0/19', 'GL': '88.83.0.0/19', 'GM': '160.182.0.0/15', 'GN': '197.149.192.0/18', 'GP': '104.250.0.0/19', 'GQ': '105.235.224.0/20', 'GR': '94.64.0.0/13', 'GT': '168.234.0.0/16', 'GU': '168.123.0.0/16', 'GW': '197.214.80.0/20', 'GY': '181.41.64.0/18', 'HK': '113.252.0.0/14', 'HN': '181.210.0.0/16', 'HR': '93.136.0.0/13', 'HT': '148.102.128.0/17', 'HU': '84.0.0.0/14', 'ID': '39.192.0.0/10', 'IE': '87.32.0.0/12', 'IL': '79.176.0.0/13', 'IM': '5.62.80.0/20', 'IN': '117.192.0.0/10', 'IO': '203.83.48.0/21', 'IQ': '37.236.0.0/14', 'IR': '2.176.0.0/12', 'IS': '82.221.0.0/16', 'IT': '79.0.0.0/10', 'JE': '87.244.64.0/18', 'JM': '72.27.0.0/17', 'JO': '176.29.0.0/16', 'JP': '126.0.0.0/8', 'KE': '105.48.0.0/12', 'KG': '158.181.128.0/17', 'KH': '36.37.128.0/17', 'KI': '103.25.140.0/22', 'KM': '197.255.224.0/20', 'KN': '198.32.32.0/19', 'KP': '175.45.176.0/22', 'KR': '175.192.0.0/10', 'KW': '37.36.0.0/14', 'KY': '64.96.0.0/15', 'KZ': '2.72.0.0/13', 'LA': '115.84.64.0/18', 'LB': '178.135.0.0/16', 'LC': '192.147.231.0/24', 'LI': '82.117.0.0/19', 'LK': '112.134.0.0/15', 'LR': '41.86.0.0/19', 'LS': '129.232.0.0/17', 'LT': '78.56.0.0/13', 'LU': '188.42.0.0/16', 'LV': '46.109.0.0/16', 'LY': '41.252.0.0/14', 'MA': '105.128.0.0/11', 'MC': '88.209.64.0/18', 'MD': '37.246.0.0/16', 'ME': '178.175.0.0/17', 'MF': '74.112.232.0/21', 'MG': '154.126.0.0/17', 'MH': '117.103.88.0/21', 'MK': '77.28.0.0/15', 'ML': '154.118.128.0/18', 'MM': '37.111.0.0/17', 'MN': '49.0.128.0/17', 'MO': '60.246.0.0/16', 'MP': '202.88.64.0/20', 'MQ': '109.203.224.0/19', 'MR': '41.188.64.0/18', 'MS': '208.90.112.0/22', 'MT': '46.11.0.0/16', 'MU': '105.16.0.0/12', 'MV': '27.114.128.0/18', 'MW': '105.234.0.0/16', 'MX': '187.192.0.0/11', 'MY': '175.136.0.0/13', 'MZ': '197.218.0.0/15', 'NA': '41.182.0.0/16', 'NC': '101.101.0.0/18', 'NE': '197.214.0.0/18', 'NF': '203.17.240.0/22', 'NG': '105.112.0.0/12', 'NI': '186.76.0.0/15', 'NL': '145.96.0.0/11', 'NO': '84.208.0.0/13', 'NP': '36.252.0.0/15', 'NR': '203.98.224.0/19', 'NU': '49.156.48.0/22', 'NZ': '49.224.0.0/14', 'OM': '5.36.0.0/15', 'PA': '186.72.0.0/15', 'PE': '186.160.0.0/14', 'PF': '123.50.64.0/18', 'PG': '124.240.192.0/19', 'PH': '49.144.0.0/13', 'PK': '39.32.0.0/11', 'PL': '83.0.0.0/11', 'PM': '70.36.0.0/20', 'PR': '66.50.0.0/16', 'PS': '188.161.0.0/16', 'PT': '85.240.0.0/13', 'PW': '202.124.224.0/20', 'PY': '181.120.0.0/14', 'QA': '37.210.0.0/15', 'RE': '139.26.0.0/16', 'RO': '79.112.0.0/13', 'RS': '178.220.0.0/14', 'RU': '5.136.0.0/13', 'RW': '105.178.0.0/15', 'SA': '188.48.0.0/13', 'SB': '202.1.160.0/19', 'SC': '154.192.0.0/11', 'SD': '154.96.0.0/13', 'SE': '78.64.0.0/12', 'SG': '152.56.0.0/14', 'SI': '188.196.0.0/14', 'SK': '78.98.0.0/15', 'SL': '197.215.0.0/17', 'SM': '89.186.32.0/19', 'SN': '41.82.0.0/15', 'SO': '197.220.64.0/19', 'SR': '186.179.128.0/17', 'SS': '105.235.208.0/21', 'ST': '197.159.160.0/19', 'SV': '168.243.0.0/16', 'SX': '190.102.0.0/20', 'SY': '5.0.0.0/16', 'SZ': '41.84.224.0/19', 'TC': '65.255.48.0/20', 'TD': '154.68.128.0/19', 'TG': '196.168.0.0/14', 'TH': '171.96.0.0/13', 'TJ': '85.9.128.0/18', 'TK': '27.96.24.0/21', 'TL': '180.189.160.0/20', 'TM': '95.85.96.0/19', 'TN': '197.0.0.0/11', 'TO': '175.176.144.0/21', 'TR': '78.160.0.0/11', 'TT': '186.44.0.0/15', 'TV': '202.2.96.0/19', 'TW': '120.96.0.0/11', 'TZ': '156.156.0.0/14', 'UA': '93.72.0.0/13', 'UG': '154.224.0.0/13', 'US': '3.0.0.0/8', 'UY': '167.56.0.0/13', 'UZ': '82.215.64.0/18', 'VA': '212.77.0.0/19', 'VC': '24.92.144.0/20', 'VE': '186.88.0.0/13', 'VG': '172.103.64.0/18', 'VI': '146.226.0.0/16', 'VN': '14.160.0.0/11', 'VU': '202.80.32.0/20', 'WF': '117.20.32.0/21', 'WS': '202.4.32.0/19', 'YE': '134.35.0.0/16', 'YT': '41.242.116.0/22', 'ZA': '41.0.0.0/11', 'ZM': '165.56.0.0/13', 'ZW': '41.85.192.0/19', } @classmethod def random_ipv4(cls, code): block = cls._country_ip_map.get(code.upper()) if not block: return None addr, preflen = block.split('/') addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0] addr_max = addr_min | (0xffffffff >> int(preflen)) return compat_str(socket.inet_ntoa( compat_struct_pack('!L', random.randint(addr_min, addr_max)))) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) return compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'): req.add_header('Ytdl-socks-proxy', proxy) # youtube-dl's http/https handlers do wrapping the socket with socks return None return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type) # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is # released into Public Domain # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387 def long_to_bytes(n, blocksize=0): """long_to_bytes(n:long, blocksize:int) : string Convert a long integer to a byte string. If optional blocksize is given and greater than zero, pad the front of the byte string with binary zeros so that the length is a multiple of blocksize. """ # after much testing, this algorithm was deemed to be the fastest s = b'' n = int(n) while n > 0: s = compat_struct_pack('>I', n & 0xffffffff) + s n = n >> 32 # strip off leading zeros for i in range(len(s)): if s[i] != b'\000'[0]: break else: # only happens when n == 0 s = b'\000' i = 0 s = s[i:] # add back some pad bytes. this could be done more efficiently w.r.t. the # de-padding being done above, but sigh... if blocksize > 0 and len(s) % blocksize: s = (blocksize - len(s) % blocksize) * b'\000' + s return s def bytes_to_long(s): """bytes_to_long(string) : long Convert a byte string to a long integer. This is (essentially) the inverse of long_to_bytes(). """ acc = 0 length = len(s) if length % 4: extra = (4 - length % 4) s = b'\000' * extra + s length = length + extra for i in range(0, length, 4): acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0] return acc def ohdave_rsa_encrypt(data, exponent, modulus): ''' Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/ Input: data: data to encrypt, bytes-like object exponent, modulus: parameter e and N of RSA algorithm, both integer Output: hex string of encrypted data Limitation: supports one block encryption only ''' payload = int(binascii.hexlify(data[::-1]), 16) encrypted = pow(payload, exponent, modulus) return '%x' % encrypted def pkcs1pad(data, length): """ Padding input data with PKCS#1 scheme @param {int[]} data input data @param {int} length target length @returns {int[]} padded data """ if len(data) > length - 11: raise ValueError('Input data too long for PKCS#1 padding') pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)] return [0, 2] + pseudo_random + [0] + data def encode_base_n(num, n, table=None): FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' if not table: table = FULL_TABLE[:n] if n > len(table): raise ValueError('base %d exceeds table length %d' % (n, len(table))) if num == 0: return table[0] ret = '' while num: ret = table[num % n] + ret num = num // n return ret def decode_packed_codes(code): mobj = re.search(PACKED_CODES_RE, code) obfucasted_code, base, count, symbols = mobj.groups() base = int(base) count = int(count) symbols = symbols.split('|') symbol_table = {} while count: count -= 1 base_n_count = encode_base_n(count, base) symbol_table[base_n_count] = symbols[count] or base_n_count return re.sub( r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)], obfucasted_code) def parse_m3u8_attributes(attrib): info = {} for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib): if val.startswith('"'): val = val[1:-1] info[key] = val return info def urshift(val, n): return val >> n if val >= 0 else (val + 0x100000000) >> n # Based on png2str() written by @gdkchan and improved by @yokrysty # Originally posted at https://github.com/rg3/youtube-dl/issues/9706 def decode_png(png_data): # Reference: https://www.w3.org/TR/PNG/ header = png_data[8:] if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR': raise IOError('Not a valid PNG file.') int_map = {1: '>B', 2: '>H', 4: '>I'} unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0] chunks = [] while header: length = unpack_integer(header[:4]) header = header[4:] chunk_type = header[:4] header = header[4:] chunk_data = header[:length] header = header[length:] header = header[4:] # Skip CRC chunks.append({ 'type': chunk_type, 'length': length, 'data': chunk_data }) ihdr = chunks[0]['data'] width = unpack_integer(ihdr[:4]) height = unpack_integer(ihdr[4:8]) idat = b'' for chunk in chunks: if chunk['type'] == b'IDAT': idat += chunk['data'] if not idat: raise IOError('Unable to read PNG data.') decompressed_data = bytearray(zlib.decompress(idat)) stride = width * 3 pixels = [] def _get_pixel(idx): x = idx % stride y = idx // stride return pixels[y][x] for y in range(height): basePos = y * (1 + stride) filter_type = decompressed_data[basePos] current_row = [] pixels.append(current_row) for x in range(stride): color = decompressed_data[1 + basePos + x] basex = y * stride + x left = 0 up = 0 if x > 2: left = _get_pixel(basex - 3) if y > 0: up = _get_pixel(basex - stride) if filter_type == 1: # Sub color = (color + left) & 0xff elif filter_type == 2: # Up color = (color + up) & 0xff elif filter_type == 3: # Average color = (color + ((left + up) >> 1)) & 0xff elif filter_type == 4: # Paeth a = left b = up c = 0 if x > 2 and y > 0: c = _get_pixel(basex - stride - 3) p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: color = (color + a) & 0xff elif pb <= pc: color = (color + b) & 0xff else: color = (color + c) & 0xff current_row.append(color) return width, height, pixels def write_xattr(path, key, value): # This mess below finds the best xattr tool for the job try: # try the pyxattr module... import xattr if hasattr(xattr, 'set'): # pyxattr # Unicode arguments are not supported in python-pyxattr until # version 0.5.0 # See https://github.com/rg3/youtube-dl/issues/5498 pyxattr_required_version = '0.5.0' if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version): # TODO: fallback to CLI tools raise XAttrUnavailableError( 'python-pyxattr is detected but is too old. ' 'youtube-dl requires %s or above while your version is %s. ' 'Falling back to other xattr implementations' % ( pyxattr_required_version, xattr.__version__)) setxattr = xattr.set else: # xattr setxattr = xattr.setxattr try: setxattr(path, key, value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) except ImportError: if compat_os_name == 'nt': # Write xattrs to NTFS Alternate Data Streams: # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29 assert ':' not in key assert os.path.exists(path) ads_fn = path + ':' + key try: with open(ads_fn, 'wb') as f: f.write(value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) else: user_has_setfattr = check_executable('setfattr', ['--version']) user_has_xattr = check_executable('xattr', ['-h']) if user_has_setfattr or user_has_xattr: value = value.decode('utf-8') if user_has_setfattr: executable = 'setfattr' opts = ['-n', key, '-v', value] elif user_has_xattr: executable = 'xattr' opts = ['-w', key, value] cmd = ([encodeFilename(executable, True)] + [encodeArgument(o) for o in opts] + [encodeFilename(path, True)]) try: p = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) stdout, stderr = p.communicate() stderr = stderr.decode('utf-8', 'replace') if p.returncode != 0: raise XAttrMetadataError(p.returncode, stderr) else: # On Unix, and can't find pyxattr, setfattr, or xattr. if sys.platform.startswith('linux'): raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'pyxattr' or 'xattr' " "modules, or the GNU 'attr' package " "(which contains the 'setfattr' tool).") else: raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'xattr' module, " "or the 'xattr' binary.") def random_birthday(year_field, month_field, day_field): return { year_field: str(random.randint(1950, 1995)), month_field: str(random.randint(1, 12)), day_field: str(random.randint(1, 31)), } #!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals import base64 import binascii import calendar import codecs import contextlib import ctypes import datetime import email.utils import email.header import errno import functools import gzip import io import itertools import json import locale import math import operator import os import platform import random import re import socket import ssl import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_HTMLParseError, compat_HTMLParser, compat_basestring, compat_chr, compat_etree_fromstring, compat_expanduser, compat_html_entities, compat_html_entities_html5, compat_http_client, compat_kwargs, compat_os_name, compat_parse_qs, compat_shlex_quote, compat_socket_create_connection, compat_str, compat_struct_pack, compat_struct_unpack, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, compat_urllib_parse_unquote_plus, compat_urllib_request, compat_urlparse, compat_xpath, ) from .socks import ( ProxyType, sockssocket, ) def register_socks_protocols(): # "Register" SOCKS protocols # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904 # URLs with protocols not in urlparse.uses_netloc are not handled correctly for scheme in ('socks', 'socks4', 'socks4a', 'socks5'): if scheme not in compat_urlparse.uses_netloc: compat_urlparse.uses_netloc.append(scheme) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } USER_AGENTS = { 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] MONTH_NAMES = { 'en': ENGLISH_MONTH_NAMES, 'fr': [ 'janvier', 'février', 'mars', 'avril', 'mai', 'juin', 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'], } KNOWN_EXTENSIONS = ( 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', 'flv', 'f4v', 'f4a', 'f4b', 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', 'mkv', 'mka', 'mk3d', 'avi', 'divx', 'mov', 'asf', 'wmv', 'wma', '3gp', '3g2', 'mp3', 'flac', 'ape', 'wav', 'f4f', 'f4m', 'm3u8', 'smil') # needed for sanitizing filenames in restricted mode ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'], 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy'))) DATE_FORMATS = ( '%d %B %Y', '%d %b %Y', '%B %d %Y', '%B %dst %Y', '%B %dnd %Y', '%B %dth %Y', '%b %d %Y', '%b %dst %Y', '%b %dnd %Y', '%b %dth %Y', '%b %dst %Y %I:%M', '%b %dnd %Y %I:%M', '%b %dth %Y %I:%M', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', '%b %d %Y at %H:%M', '%b %d %Y at %H:%M:%S', ) DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS) DATE_FORMATS_DAY_FIRST.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d.%m.%y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS) DATE_FORMATS_MONTH_FIRST.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)" def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): for f in node.findall(compat_xpath(xpath)): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): return node.find(compat_xpath(xpath)) if isinstance(xpath, (str, compat_str)): n = _find_xpath(xpath) else: for xp in xpath: n = _find_xpath(xp) if n is not None: break if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute('id', id, html) def get_element_by_class(class_name, html): """Return the content of the first tag with the specified class in the passed HTML document""" retval = get_elements_by_class(class_name, html) return retval[0] if retval else None def get_element_by_attribute(attribute, value, html, escape_value=True): retval = get_elements_by_attribute(attribute, value, html, escape_value) return retval[0] if retval else None def get_elements_by_class(class_name, html): """Return the content of all tags with the specified class in the passed HTML document as a list""" return get_elements_by_attribute( 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name), html, escape_value=False) def get_elements_by_attribute(attribute, value, html, escape_value=True): """Return the content of the tag with the specified attribute in the passed HTML document""" value = re.escape(value) if escape_value else value retlist = [] for m in re.finditer(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), value), html): res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] retlist.append(unescapeHTML(res)) return retlist class HTMLAttributeParser(compat_HTMLParser): """Trivial HTML parser to gather the attributes for a single element""" def __init__(self): self.attrs = {} compat_HTMLParser.__init__(self) def handle_starttag(self, tag, attrs): self.attrs = dict(attrs) def extract_attributes(html_element): """Given a string for an HTML element such as <el a="foo" B="bar" c="&98;az" d=boz empty= noval entity="&amp;" sq='"' dq="'" > Decode and return a dictionary of attributes. { 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz', 'empty': '', 'noval': None, 'entity': '&', 'sq': '"', 'dq': '\'' }. NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions, but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5. """ parser = HTMLAttributeParser() try: parser.feed(html_element) parser.close() # Older Python may throw HTMLParseError in case of malformed HTML except compat_HTMLParseError: pass return parser.attrs def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'(?u)\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'(?u)<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible. """ def replace_insane(char): if restricted and char in ACCENT_CHARS: return ACCENT_CHARS[char] if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of # unwanted failures due to missing protocol def sanitize_url(url): return 'http:%s' % url if url.startswith('//') else url def sanitized_Request(url, *args, **kwargs): return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs) def expand_path(s): """Expand shell variables and ~""" return os.path.expandvars(compat_expanduser(s)) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity_with_semicolon): """Transforms an HTML entity to a character.""" entity = entity_with_semicolon[:-1] # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) # TODO: HTML5 allows entities without a semicolon. For example, # '&Eacuteric' should be decoded as 'Éric'. if entity_with_semicolon in compat_html_entities_html5: return compat_html_entities_html5[entity_with_semicolon] mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 # See https://github.com/rg3/youtube-dl/issues/7518 try: return compat_chr(int(numstr, base)) except ValueError: pass # Unknown entity in name, return its literal representation return '&%s;' % entity def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+;)', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible if sys.platform.startswith('java'): return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class YoutubeDLError(Exception): """Base exception for YoutubeDL errors.""" pass class ExtractorError(YoutubeDLError): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class GeoRestrictedError(ExtractorError): """Geographic restriction Error exception. This exception may be thrown when a video is not available from your geographic location due to geographic restrictions imposed by a website. """ def __init__(self, msg, countries=None): super(GeoRestrictedError, self).__init__(msg, expected=True) self.msg = msg self.countries = countries class DownloadError(YoutubeDLError): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(YoutubeDLError): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(YoutubeDLError): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): super(PostProcessingError, self).__init__(msg) self.msg = msg class MaxDownloadsReached(YoutubeDLError): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(YoutubeDLError): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(YoutubeDLError): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): super(ContentTooShortError, self).__init__( 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected) ) # Both in bytes self.downloaded = downloaded self.expected = expected class XAttrMetadataError(YoutubeDLError): def __init__(self, code=None, msg='Unknown error'): super(XAttrMetadataError, self).__init__(msg) self.code = code self.msg = msg # Parsing code and msg if (self.code in (errno.ENOSPC, errno.EDQUOT) or 'No space left' in self.msg or 'Disk quota excedded' in self.msg): self.reason = 'NO_SPACE' elif self.code == errno.E2BIG or 'Argument list too long' in self.msg: self.reason = 'VALUE_TOO_LONG' else: self.reason = 'NOT_SUPPORTED' class XAttrUnavailableError(YoutubeDLError): pass def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs[b'strict'] = True hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc def handle_youtubedl_headers(headers): filtered_headers = headers if 'Youtubedl-no-compression' in filtered_headers: filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') del filtered_headers['Youtubedl-no-compression'] return filtered_headers class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-no-compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): conn_class = compat_http_client.HTTPConnection socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req = update_Request(req, url=url_escaped) for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) req.headers = handle_youtubedl_headers(req.headers) if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') else: location = location.decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] if sys.version_info < (3, 0): location_escaped = location_escaped.encode('utf-8') resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response def make_socks_conn_class(base_class, socks_proxy): assert issubclass(base_class, ( compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection)) url_components = compat_urlparse.urlparse(socks_proxy) if url_components.scheme.lower() == 'socks5': socks_type = ProxyType.SOCKS5 elif url_components.scheme.lower() in ('socks', 'socks4'): socks_type = ProxyType.SOCKS4 elif url_components.scheme.lower() == 'socks4a': socks_type = ProxyType.SOCKS4A def unquote_if_non_empty(s): if not s: return s return compat_urllib_parse_unquote_plus(s) proxy_args = ( socks_type, url_components.hostname, url_components.port or 1080, True, # Remote DNS unquote_if_non_empty(url_components.username), unquote_if_non_empty(url_components.password), ) class SocksConnection(base_class): def connect(self): self.sock = sockssocket() self.sock.setproxy(*proxy_args) if type(self.timeout) in (int, float): self.sock.settimeout(self.timeout) self.sock.connect((self.host, self.port)) if isinstance(self, compat_http_client.HTTPSConnection): if hasattr(self, '_context'): # Python > 2.6 self.sock = self._context.wrap_socket( self.sock, server_hostname=self.host) else: self.sock = ssl.wrap_socket(self.sock) return SocksConnection class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} conn_class = self._https_conn_class if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, True), req, **kwargs) class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def extract_timezone(date_str): m = re.search( r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group('tz'))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) return timezone, date_str def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: timezone, date_str = extract_timezone(date_str) try: date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) except ValueError: pass def date_formats(day_first=True): return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) _, date_str = extract_timezone(date_str) for expression in date_formats(day_first): try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: try: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') except ValueError: pass if upload_date is not None: return compat_str(upload_date) def unified_timestamp(date_str, day_first=True): if date_str is None: return None date_str = re.sub(r'[,|]', '', date_str) pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0 timezone, date_str = extract_timezone(date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) # Remove unrecognized timezones from ISO 8601 alike timestamps m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str) if m: date_str = date_str[:-len(m.group('tz'))] for expression in date_formats(day_first): try: dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta) return calendar.timegm(dt.timetuple()) except ValueError: pass timetuple = email.utils.parsedate_tz(date_str) if timetuple: return calendar.timegm(timetuple) + pm_delta * 3600 def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download elif guess.rstrip('/') in KNOWN_EXTENSIONS: return guess.rstrip('/') else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad approximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, '%Y%m%d').date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( (b'GetStdHandle', ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( (b'GetConsoleMode', ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return compat_struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: # Some platforms, such as Jython, is missing fcntl try: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) except ImportError: UNSUPPORTED_MSG = 'file locking is not supported on this platform' def _lock_file(f, exclusive): raise IOError(UNSUPPORTED_MSG) def _unlock_file(f): raise IOError(UNSUPPORTED_MSG) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(compat_shlex_quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ url, idata = unsmuggle_url(url, {}) data.update(idata) sdata = compat_urllib_parse_urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def lookup_unit_table(unit_table, s): units_re = '|'.join(re.escape(u) for u in unit_table) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = unit_table[m.group('unit')] return int(float(num_str) * mult) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and unofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'bytes': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'kb': 1000, 'kilobytes': 1000, 'kibibytes': 1024, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'mb': 1000 ** 2, 'megabytes': 1000 ** 2, 'mebibytes': 1024 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'gb': 1000 ** 3, 'gigabytes': 1000 ** 3, 'gibibytes': 1024 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'tb': 1000 ** 4, 'terabytes': 1000 ** 4, 'tebibytes': 1024 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'pb': 1000 ** 5, 'petabytes': 1000 ** 5, 'pebibytes': 1024 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'eb': 1000 ** 6, 'exabytes': 1000 ** 6, 'exbibytes': 1024 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'zb': 1000 ** 7, 'zettabytes': 1000 ** 7, 'zebibytes': 1024 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, 'yb': 1000 ** 8, 'yottabytes': 1000 ** 8, 'yobibytes': 1024 ** 8, } return lookup_unit_table(_UNIT_TABLE, s) def parse_count(s): if s is None: return None s = s.strip() if re.match(r'^[\d,.]+$', s): return str_to_int(s) _UNIT_TABLE = { 'k': 1000, 'K': 1000, 'm': 1000 ** 2, 'M': 1000 ** 2, 'kk': 1000 ** 2, 'KK': 1000 ** 2, } return lookup_unit_table(_UNIT_TABLE, s) def month_by_name(name, lang='en'): """ Return the number of a month by (locale-independently) English name """ month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en']) try: return month_names.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) # ctypes in Jython is not complete # http://bugs.jython.org/issue2148 if sys.platform.startswith('java'): return try: libc = ctypes.cdll.LoadLibrary('libc.so.6') except OSError: return except TypeError: # LoadLibrary in Windows Python 2.7.13 only expects # a bytestring, but since unicode_literals turns # every string into a unicode string, it fails. return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): return s[len(start):] if s is not None and s.startswith(start) else s def remove_end(s, end): return s[:-len(end)] if s is not None and s.endswith(end) else s def remove_quotes(s): if s is None or len(s) < 2: return s for quote in ('"', "'", ): if s[0] == quote and s[-1] == quote: return s[1:-1] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] def base_url(url): return re.match(r'https?://[^?#&]+/', url).group() def urljoin(base, path): if isinstance(path, bytes): path = path.decode('utf-8') if not isinstance(path, compat_str) or not path: return None if re.match(r'^(?:https?:)?//', path): return path if isinstance(base, bytes): base = base.decode('utf-8') if not isinstance(base, compat_str) or not re.match( r'^(?:https?:)?//', base): return None return compat_urlparse.urljoin(base, path) class HEADRequest(compat_urllib_request.Request): def get_method(self): return 'HEAD' class PUTRequest(compat_urllib_request.Request): def get_method(self): return 'PUT' def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def strip_or_none(v): return None if v is None else v.strip() def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() days, hours, mins, secs, ms = [None] * 5 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match( r'''(?ix)(?:P?T)? (?: (?P<days>[0-9]+)\s*d(?:ays?)?\s* )? (?: (?P<hours>[0-9]+)\s*h(?:ours?)?\s* )? (?: (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s* )? (?: (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s* )?Z?$''', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s) if m: hours, mins = m.groups() else: return None duration = 0 if secs: duration += float(secs) if mins: duration += float(mins) * 60 if hours: duration += float(hours) * 60 * 60 if days: duration += float(days) * 24 * 60 * 60 if ms: duration += float(ms) return duration def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers # SIGTTOU if youtube-dl is run in the background. # See https://github.com/rg3/youtube-dl/issues/955#issuecomment-209789656 out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize, use_cache=False): self._pagefunc = pagefunc self._pagesize = pagesize self._use_cache = use_cache if use_cache: self._cache = {} def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = None if self._use_cache: page_results = self._cache.get(pagenum) if page_results is None: page_results = list(self._pagefunc(pagenum)) if self._use_cache: self._cache[pagenum] = page_results startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( netloc=url_parsed.netloc.encode('idna').decode('ascii'), path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii') def update_url_query(url, query): if not query: return url parsed_url = compat_urlparse.urlparse(url) qs = compat_parse_qs(parsed_url.query) qs.update(query) return compat_urlparse.urlunparse(parsed_url._replace( query=compat_urllib_parse_urlencode(qs, True))) def update_Request(req, url=None, data=None, headers={}, query={}): req_headers = req.headers.copy() req_headers.update(headers) req_data = data or req.data req_url = update_url_query(url or req.get_full_url(), query) req_get_method = req.get_method() if req_get_method == 'HEAD': req_type = HEADRequest elif req_get_method == 'PUT': req_type = PUTRequest else: req_type = compat_urllib_request.Request new_req = req_type( req_url, data=req_data, headers=req_headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) if hasattr(req, 'timeout'): new_req.timeout = req.timeout return new_req def _multipart_encode_impl(data, boundary): content_type = 'multipart/form-data; boundary=%s' % boundary out = b'' for k, v in data.items(): out += b'--' + boundary.encode('ascii') + b'\r\n' if isinstance(k, compat_str): k = k.encode('utf-8') if isinstance(v, compat_str): v = v.encode('utf-8') # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578 # suggests sending UTF-8 directly. Firefox sends UTF-8, too content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n' if boundary.encode('ascii') in content: raise ValueError('Boundary overlaps with data') out += content out += b'--' + boundary.encode('ascii') + b'--\r\n' return out, content_type def multipart_encode(data, boundary=None): ''' Encode a dict to RFC 7578-compliant form-data data: A dict where keys and values can be either Unicode or bytes-like objects. boundary: If specified a Unicode object, it's used as the boundary. Otherwise a random boundary is generated. Reference: https://tools.ietf.org/html/rfc7578 ''' has_specified_boundary = boundary is not None while True: if boundary is None: boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff)) try: out, content_type = _multipart_encode_impl(data, boundary) break except ValueError: if has_specified_boundary: raise boundary = None return out, content_type def dict_get(d, key_or_keys, default=None, skip_false_values=True): if isinstance(key_or_keys, (list, tuple)): for key in key_or_keys: if key not in d or d[key] is None or skip_false_values and not d[key]: continue return d[key] return default return d.get(key_or_keys, default) def try_get(src, getter, expected_type=None): if not isinstance(getter, (list, tuple)): getter = [getter] for get in getter: try: v = get(src) except (AttributeError, KeyError, TypeError, IndexError): pass else: if expected_type is None or isinstance(v, expected_type): return v def encode_compat_str(string, encoding=preferredencoding(), errors='strict'): return string if isinstance(string, compat_str) else compat_str(string, encoding, errors) US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } TV_PARENTAL_GUIDELINES = { 'TV-Y': 0, 'TV-Y7': 7, 'TV-G': 0, 'TV-PG': 0, 'TV-14': 14, 'TV-MA': 17, } def parse_age_limit(s): if type(s) == int: return s if 0 <= s <= 21 else None if not isinstance(s, compat_basestring): return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) if m: return int(m.group('age')) if s in US_RATINGS: return US_RATINGS[s] return TV_PARENTAL_GUIDELINES.get(s) def strip_jsonp(code): return re.sub( r'''(?sx)^ (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]+) (?:\s*&&\s*(?P=func_name))? \s*\(\s*(?P<callback_data>.*)\);? \s*?(?://[^\n]*)*$''', r'\g<callback_data>', code) def js_to_json(code): COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*' SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE) INTEGER_TABLE = ( (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16), (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8), ) def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v elif v.startswith('/*') or v.startswith('//') or v == ',': return "" if v[0] in ("'", '"'): v = re.sub(r'(?s)\\.|"', lambda m: { '"': '\\"', "\\'": "'", '\\\n': '', '\\x': '\\u00', }.get(m.group(0), m.group(0)), v[1:-1]) for regex, base in INTEGER_TABLE: im = re.match(regex, v) if im: i = int(im.group(1), base) return '"%d":' % i if v.endswith(':') else '%d' % i return '"%s"' % v return re.sub(r'''(?sx) "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'| {comment}|,(?={skip}[\]}}])| [a-zA-Z_][.a-zA-Z_0-9]*| \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?| [0-9]+(?={skip}:) '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code) def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(compat_shlex_quote(a) for a in args) def error_to_compat_str(err): err_str = str(err) # On python 2 error byte string must be decoded with proper # encoding rather than ascii if sys.version_info[0] < 3: err_str = err_str.decode(preferredencoding()) return err_str def mimetype2ext(mt): if mt is None: return None ext = { 'audio/mp4': 'm4a', # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as # it's the most popular one 'audio/mpeg': 'mp3', }.get(mt) if ext is not None: return ext _, _, res = mt.rpartition('/') res = res.split(';')[0].strip().lower() return { '3gpp': '3gp', 'smptett+xml': 'tt', 'ttaf+xml': 'dfxp', 'ttml+xml': 'ttml', 'x-flv': 'flv', 'x-mp4-fragmented': 'mp4', 'x-ms-wmv': 'wmv', 'mpegurl': 'm3u8', 'x-mpegurl': 'm3u8', 'vnd.apple.mpegurl': 'm3u8', 'dash+xml': 'mpd', 'f4m+xml': 'f4m', 'hds+xml': 'f4m', 'vnd.ms-sstr+xml': 'ism', 'quicktime': 'mov', 'mp2t': 'ts', }.get(res, res) def parse_codecs(codecs_str): # http://tools.ietf.org/html/rfc6381 if not codecs_str: return {} splited_codecs = list(filter(None, map( lambda str: str.strip(), codecs_str.strip().strip(',').split(',')))) vcodec, acodec = None, None for full_codec in splited_codecs: codec = full_codec.split('.')[0] if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v'): if not vcodec: vcodec = full_codec elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'): if not acodec: acodec = full_codec else: write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr) if not vcodec and not acodec: if len(splited_codecs) == 2: return { 'vcodec': vcodec, 'acodec': acodec, } elif len(splited_codecs) == 1: return { 'vcodec': 'none', 'acodec': vcodec, } else: return { 'vcodec': vcodec or 'none', 'acodec': acodec or 'none', } return {} def urlhandle_detect_ext(url_handle): getheader = url_handle.headers.get cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<quote>["\'])(?P<quotedstrval>(?:\\.|(?!(?P=quote)|\\).)+?)(?P=quote)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) if (m.group('quotedstrval') is not None or m.group('strval') is not None or # If the original field is a string and matching comparisonvalue is # a number we should respect the origin of the original field # and process comparison value as a string (see # https://github.com/rg3/youtube-dl/issues/11082). actual_value is not None and m.group('intval') is not None and isinstance(actual_value, compat_str)): if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval') quote = m.group('quote') if quote is not None: comparison_value = comparison_value.replace(r'\%s' % quote, quote) else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.')) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): LEGACY_NAMESPACES = ( ('http://www.w3.org/ns/ttml', [ 'http://www.w3.org/2004/11/ttaf1', 'http://www.w3.org/2006/04/ttaf1', 'http://www.w3.org/2006/10/ttaf1', ]), ('http://www.w3.org/ns/ttml#styling', [ 'http://www.w3.org/ns/ttml#style', ]), ) SUPPORTED_STYLING = [ 'color', 'fontFamily', 'fontSize', 'fontStyle', 'fontWeight', 'textDecoration' ] _x = functools.partial(xpath_with_ns, ns_map={ 'ttml': 'http://www.w3.org/ns/ttml', 'tts': 'http://www.w3.org/ns/ttml#styling', }) styles = {} default_style = {} class TTMLPElementParser(object): _out = '' _unclosed_elements = [] _applied_styles = [] def start(self, tag, attrib): if tag in (_x('ttml:br'), 'br'): self._out += '\n' else: unclosed_elements = [] style = {} element_style_id = attrib.get('style') if default_style: style.update(default_style) if element_style_id: style.update(styles.get(element_style_id, {})) for prop in SUPPORTED_STYLING: prop_val = attrib.get(_x('tts:' + prop)) if prop_val: style[prop] = prop_val if style: font = '' for k, v in sorted(style.items()): if self._applied_styles and self._applied_styles[-1].get(k) == v: continue if k == 'color': font += ' color="%s"' % v elif k == 'fontSize': font += ' size="%s"' % v elif k == 'fontFamily': font += ' face="%s"' % v elif k == 'fontWeight' and v == 'bold': self._out += '<b>' unclosed_elements.append('b') elif k == 'fontStyle' and v == 'italic': self._out += '<i>' unclosed_elements.append('i') elif k == 'textDecoration' and v == 'underline': self._out += '<u>' unclosed_elements.append('u') if font: self._out += '<font' + font + '>' unclosed_elements.append('font') applied_style = {} if self._applied_styles: applied_style.update(self._applied_styles[-1]) applied_style.update(style) self._applied_styles.append(applied_style) self._unclosed_elements.append(unclosed_elements) def end(self, tag): if tag not in (_x('ttml:br'), 'br'): unclosed_elements = self._unclosed_elements.pop() for element in reversed(unclosed_elements): self._out += '</%s>' % element if unclosed_elements and self._applied_styles: self._applied_styles.pop() def data(self, data): self._out += data def close(self): return self._out.strip() def parse_node(node): target = TTMLPElementParser() parser = xml.etree.ElementTree.XMLParser(target=target) parser.feed(xml.etree.ElementTree.tostring(node)) return parser.close() for k, v in LEGACY_NAMESPACES: for ns in v: dfxp_data = dfxp_data.replace(ns, k) dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8')) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') repeat = False while True: for style in dfxp.findall(_x('.//ttml:style')): style_id = style.get('id') parent_style_id = style.get('style') if parent_style_id: if parent_style_id not in styles: repeat = True continue styles[style_id] = styles[parent_style_id].copy() for prop in SUPPORTED_STYLING: prop_val = style.get(_x('tts:' + prop)) if prop_val: styles.setdefault(style_id, {})[prop] = prop_val if repeat: repeat = False else: break for p in ('body', 'div'): ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p]) if ele is None: continue style = styles.get(ele.get('style')) if not style: continue default_style.update(style) for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) dur = parse_dfxp_time_expr(para.attrib.get('dur')) if begin_time is None: continue if not end_time: if not dur: continue end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) if param: param = compat_str(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class GeoUtils(object): # Major IPv4 address blocks per country _country_ip_map = { 'AD': '85.94.160.0/19', 'AE': '94.200.0.0/13', 'AF': '149.54.0.0/17', 'AG': '209.59.64.0/18', 'AI': '204.14.248.0/21', 'AL': '46.99.0.0/16', 'AM': '46.70.0.0/15', 'AO': '105.168.0.0/13', 'AP': '159.117.192.0/21', 'AR': '181.0.0.0/12', 'AS': '202.70.112.0/20', 'AT': '84.112.0.0/13', 'AU': '1.128.0.0/11', 'AW': '181.41.0.0/18', 'AZ': '5.191.0.0/16', 'BA': '31.176.128.0/17', 'BB': '65.48.128.0/17', 'BD': '114.130.0.0/16', 'BE': '57.0.0.0/8', 'BF': '129.45.128.0/17', 'BG': '95.42.0.0/15', 'BH': '37.131.0.0/17', 'BI': '154.117.192.0/18', 'BJ': '137.255.0.0/16', 'BL': '192.131.134.0/24', 'BM': '196.12.64.0/18', 'BN': '156.31.0.0/16', 'BO': '161.56.0.0/16', 'BQ': '161.0.80.0/20', 'BR': '152.240.0.0/12', 'BS': '24.51.64.0/18', 'BT': '119.2.96.0/19', 'BW': '168.167.0.0/16', 'BY': '178.120.0.0/13', 'BZ': '179.42.192.0/18', 'CA': '99.224.0.0/11', 'CD': '41.243.0.0/16', 'CF': '196.32.200.0/21', 'CG': '197.214.128.0/17', 'CH': '85.0.0.0/13', 'CI': '154.232.0.0/14', 'CK': '202.65.32.0/19', 'CL': '152.172.0.0/14', 'CM': '165.210.0.0/15', 'CN': '36.128.0.0/10', 'CO': '181.240.0.0/12', 'CR': '201.192.0.0/12', 'CU': '152.206.0.0/15', 'CV': '165.90.96.0/19', 'CW': '190.88.128.0/17', 'CY': '46.198.0.0/15', 'CZ': '88.100.0.0/14', 'DE': '53.0.0.0/8', 'DJ': '197.241.0.0/17', 'DK': '87.48.0.0/12', 'DM': '192.243.48.0/20', 'DO': '152.166.0.0/15', 'DZ': '41.96.0.0/12', 'EC': '186.68.0.0/15', 'EE': '90.190.0.0/15', 'EG': '156.160.0.0/11', 'ER': '196.200.96.0/20', 'ES': '88.0.0.0/11', 'ET': '196.188.0.0/14', 'EU': '2.16.0.0/13', 'FI': '91.152.0.0/13', 'FJ': '144.120.0.0/16', 'FM': '119.252.112.0/20', 'FO': '88.85.32.0/19', 'FR': '90.0.0.0/9', 'GA': '41.158.0.0/15', 'GB': '25.0.0.0/8', 'GD': '74.122.88.0/21', 'GE': '31.146.0.0/16', 'GF': '161.22.64.0/18', 'GG': '62.68.160.0/19', 'GH': '45.208.0.0/14', 'GI': '85.115.128.0/19', 'GL': '88.83.0.0/19', 'GM': '160.182.0.0/15', 'GN': '197.149.192.0/18', 'GP': '104.250.0.0/19', 'GQ': '105.235.224.0/20', 'GR': '94.64.0.0/13', 'GT': '168.234.0.0/16', 'GU': '168.123.0.0/16', 'GW': '197.214.80.0/20', 'GY': '181.41.64.0/18', 'HK': '113.252.0.0/14', 'HN': '181.210.0.0/16', 'HR': '93.136.0.0/13', 'HT': '148.102.128.0/17', 'HU': '84.0.0.0/14', 'ID': '39.192.0.0/10', 'IE': '87.32.0.0/12', 'IL': '79.176.0.0/13', 'IM': '5.62.80.0/20', 'IN': '117.192.0.0/10', 'IO': '203.83.48.0/21', 'IQ': '37.236.0.0/14', 'IR': '2.176.0.0/12', 'IS': '82.221.0.0/16', 'IT': '79.0.0.0/10', 'JE': '87.244.64.0/18', 'JM': '72.27.0.0/17', 'JO': '176.29.0.0/16', 'JP': '126.0.0.0/8', 'KE': '105.48.0.0/12', 'KG': '158.181.128.0/17', 'KH': '36.37.128.0/17', 'KI': '103.25.140.0/22', 'KM': '197.255.224.0/20', 'KN': '198.32.32.0/19', 'KP': '175.45.176.0/22', 'KR': '175.192.0.0/10', 'KW': '37.36.0.0/14', 'KY': '64.96.0.0/15', 'KZ': '2.72.0.0/13', 'LA': '115.84.64.0/18', 'LB': '178.135.0.0/16', 'LC': '192.147.231.0/24', 'LI': '82.117.0.0/19', 'LK': '112.134.0.0/15', 'LR': '41.86.0.0/19', 'LS': '129.232.0.0/17', 'LT': '78.56.0.0/13', 'LU': '188.42.0.0/16', 'LV': '46.109.0.0/16', 'LY': '41.252.0.0/14', 'MA': '105.128.0.0/11', 'MC': '88.209.64.0/18', 'MD': '37.246.0.0/16', 'ME': '178.175.0.0/17', 'MF': '74.112.232.0/21', 'MG': '154.126.0.0/17', 'MH': '117.103.88.0/21', 'MK': '77.28.0.0/15', 'ML': '154.118.128.0/18', 'MM': '37.111.0.0/17', 'MN': '49.0.128.0/17', 'MO': '60.246.0.0/16', 'MP': '202.88.64.0/20', 'MQ': '109.203.224.0/19', 'MR': '41.188.64.0/18', 'MS': '208.90.112.0/22', 'MT': '46.11.0.0/16', 'MU': '105.16.0.0/12', 'MV': '27.114.128.0/18', 'MW': '105.234.0.0/16', 'MX': '187.192.0.0/11', 'MY': '175.136.0.0/13', 'MZ': '197.218.0.0/15', 'NA': '41.182.0.0/16', 'NC': '101.101.0.0/18', 'NE': '197.214.0.0/18', 'NF': '203.17.240.0/22', 'NG': '105.112.0.0/12', 'NI': '186.76.0.0/15', 'NL': '145.96.0.0/11', 'NO': '84.208.0.0/13', 'NP': '36.252.0.0/15', 'NR': '203.98.224.0/19', 'NU': '49.156.48.0/22', 'NZ': '49.224.0.0/14', 'OM': '5.36.0.0/15', 'PA': '186.72.0.0/15', 'PE': '186.160.0.0/14', 'PF': '123.50.64.0/18', 'PG': '124.240.192.0/19', 'PH': '49.144.0.0/13', 'PK': '39.32.0.0/11', 'PL': '83.0.0.0/11', 'PM': '70.36.0.0/20', 'PR': '66.50.0.0/16', 'PS': '188.161.0.0/16', 'PT': '85.240.0.0/13', 'PW': '202.124.224.0/20', 'PY': '181.120.0.0/14', 'QA': '37.210.0.0/15', 'RE': '139.26.0.0/16', 'RO': '79.112.0.0/13', 'RS': '178.220.0.0/14', 'RU': '5.136.0.0/13', 'RW': '105.178.0.0/15', 'SA': '188.48.0.0/13', 'SB': '202.1.160.0/19', 'SC': '154.192.0.0/11', 'SD': '154.96.0.0/13', 'SE': '78.64.0.0/12', 'SG': '152.56.0.0/14', 'SI': '188.196.0.0/14', 'SK': '78.98.0.0/15', 'SL': '197.215.0.0/17', 'SM': '89.186.32.0/19', 'SN': '41.82.0.0/15', 'SO': '197.220.64.0/19', 'SR': '186.179.128.0/17', 'SS': '105.235.208.0/21', 'ST': '197.159.160.0/19', 'SV': '168.243.0.0/16', 'SX': '190.102.0.0/20', 'SY': '5.0.0.0/16', 'SZ': '41.84.224.0/19', 'TC': '65.255.48.0/20', 'TD': '154.68.128.0/19', 'TG': '196.168.0.0/14', 'TH': '171.96.0.0/13', 'TJ': '85.9.128.0/18', 'TK': '27.96.24.0/21', 'TL': '180.189.160.0/20', 'TM': '95.85.96.0/19', 'TN': '197.0.0.0/11', 'TO': '175.176.144.0/21', 'TR': '78.160.0.0/11', 'TT': '186.44.0.0/15', 'TV': '202.2.96.0/19', 'TW': '120.96.0.0/11', 'TZ': '156.156.0.0/14', 'UA': '93.72.0.0/13', 'UG': '154.224.0.0/13', 'US': '3.0.0.0/8', 'UY': '167.56.0.0/13', 'UZ': '82.215.64.0/18', 'VA': '212.77.0.0/19', 'VC': '24.92.144.0/20', 'VE': '186.88.0.0/13', 'VG': '172.103.64.0/18', 'VI': '146.226.0.0/16', 'VN': '14.160.0.0/11', 'VU': '202.80.32.0/20', 'WF': '117.20.32.0/21', 'WS': '202.4.32.0/19', 'YE': '134.35.0.0/16', 'YT': '41.242.116.0/22', 'ZA': '41.0.0.0/11', 'ZM': '165.56.0.0/13', 'ZW': '41.85.192.0/19', } @classmethod def random_ipv4(cls, code): block = cls._country_ip_map.get(code.upper()) if not block: return None addr, preflen = block.split('/') addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0] addr_max = addr_min | (0xffffffff >> int(preflen)) return compat_str(socket.inet_ntoa( compat_struct_pack('!L', random.randint(addr_min, addr_max)))) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) return compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'): req.add_header('Ytdl-socks-proxy', proxy) # youtube-dl's http/https handlers do wrapping the socket with socks return None return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type) # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is # released into Public Domain # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387 def long_to_bytes(n, blocksize=0): """long_to_bytes(n:long, blocksize:int) : string Convert a long integer to a byte string. If optional blocksize is given and greater than zero, pad the front of the byte string with binary zeros so that the length is a multiple of blocksize. """ # after much testing, this algorithm was deemed to be the fastest s = b'' n = int(n) while n > 0: s = compat_struct_pack('>I', n & 0xffffffff) + s n = n >> 32 # strip off leading zeros for i in range(len(s)): if s[i] != b'\000'[0]: break else: # only happens when n == 0 s = b'\000' i = 0 s = s[i:] # add back some pad bytes. this could be done more efficiently w.r.t. the # de-padding being done above, but sigh... if blocksize > 0 and len(s) % blocksize: s = (blocksize - len(s) % blocksize) * b'\000' + s return s def bytes_to_long(s): """bytes_to_long(string) : long Convert a byte string to a long integer. This is (essentially) the inverse of long_to_bytes(). """ acc = 0 length = len(s) if length % 4: extra = (4 - length % 4) s = b'\000' * extra + s length = length + extra for i in range(0, length, 4): acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0] return acc def ohdave_rsa_encrypt(data, exponent, modulus): ''' Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/ Input: data: data to encrypt, bytes-like object exponent, modulus: parameter e and N of RSA algorithm, both integer Output: hex string of encrypted data Limitation: supports one block encryption only ''' payload = int(binascii.hexlify(data[::-1]), 16) encrypted = pow(payload, exponent, modulus) return '%x' % encrypted def pkcs1pad(data, length): """ Padding input data with PKCS#1 scheme @param {int[]} data input data @param {int} length target length @returns {int[]} padded data """ if len(data) > length - 11: raise ValueError('Input data too long for PKCS#1 padding') pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)] return [0, 2] + pseudo_random + [0] + data def encode_base_n(num, n, table=None): FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' if not table: table = FULL_TABLE[:n] if n > len(table): raise ValueError('base %d exceeds table length %d' % (n, len(table))) if num == 0: return table[0] ret = '' while num: ret = table[num % n] + ret num = num // n return ret def decode_packed_codes(code): mobj = re.search(PACKED_CODES_RE, code) obfucasted_code, base, count, symbols = mobj.groups() base = int(base) count = int(count) symbols = symbols.split('|') symbol_table = {} while count: count -= 1 base_n_count = encode_base_n(count, base) symbol_table[base_n_count] = symbols[count] or base_n_count return re.sub( r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)], obfucasted_code) def parse_m3u8_attributes(attrib): info = {} for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib): if val.startswith('"'): val = val[1:-1] info[key] = val return info def urshift(val, n): return val >> n if val >= 0 else (val + 0x100000000) >> n # Based on png2str() written by @gdkchan and improved by @yokrysty # Originally posted at https://github.com/rg3/youtube-dl/issues/9706 def decode_png(png_data): # Reference: https://www.w3.org/TR/PNG/ header = png_data[8:] if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR': raise IOError('Not a valid PNG file.') int_map = {1: '>B', 2: '>H', 4: '>I'} unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0] chunks = [] while header: length = unpack_integer(header[:4]) header = header[4:] chunk_type = header[:4] header = header[4:] chunk_data = header[:length] header = header[length:] header = header[4:] # Skip CRC chunks.append({ 'type': chunk_type, 'length': length, 'data': chunk_data }) ihdr = chunks[0]['data'] width = unpack_integer(ihdr[:4]) height = unpack_integer(ihdr[4:8]) idat = b'' for chunk in chunks: if chunk['type'] == b'IDAT': idat += chunk['data'] if not idat: raise IOError('Unable to read PNG data.') decompressed_data = bytearray(zlib.decompress(idat)) stride = width * 3 pixels = [] def _get_pixel(idx): x = idx % stride y = idx // stride return pixels[y][x] for y in range(height): basePos = y * (1 + stride) filter_type = decompressed_data[basePos] current_row = [] pixels.append(current_row) for x in range(stride): color = decompressed_data[1 + basePos + x] basex = y * stride + x left = 0 up = 0 if x > 2: left = _get_pixel(basex - 3) if y > 0: up = _get_pixel(basex - stride) if filter_type == 1: # Sub color = (color + left) & 0xff elif filter_type == 2: # Up color = (color + up) & 0xff elif filter_type == 3: # Average color = (color + ((left + up) >> 1)) & 0xff elif filter_type == 4: # Paeth a = left b = up c = 0 if x > 2 and y > 0: c = _get_pixel(basex - stride - 3) p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: color = (color + a) & 0xff elif pb <= pc: color = (color + b) & 0xff else: color = (color + c) & 0xff current_row.append(color) return width, height, pixels def write_xattr(path, key, value): # This mess below finds the best xattr tool for the job try: # try the pyxattr module... import xattr if hasattr(xattr, 'set'): # pyxattr # Unicode arguments are not supported in python-pyxattr until # version 0.5.0 # See https://github.com/rg3/youtube-dl/issues/5498 pyxattr_required_version = '0.5.0' if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version): # TODO: fallback to CLI tools raise XAttrUnavailableError( 'python-pyxattr is detected but is too old. ' 'youtube-dl requires %s or above while your version is %s. ' 'Falling back to other xattr implementations' % ( pyxattr_required_version, xattr.__version__)) setxattr = xattr.set else: # xattr setxattr = xattr.setxattr try: setxattr(path, key, value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) except ImportError: if compat_os_name == 'nt': # Write xattrs to NTFS Alternate Data Streams: # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29 assert ':' not in key assert os.path.exists(path) ads_fn = path + ':' + key try: with open(ads_fn, 'wb') as f: f.write(value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) else: user_has_setfattr = check_executable('setfattr', ['--version']) user_has_xattr = check_executable('xattr', ['-h']) if user_has_setfattr or user_has_xattr: value = value.decode('utf-8') if user_has_setfattr: executable = 'setfattr' opts = ['-n', key, '-v', value] elif user_has_xattr: executable = 'xattr' opts = ['-w', key, value] cmd = ([encodeFilename(executable, True)] + [encodeArgument(o) for o in opts] + [encodeFilename(path, True)]) try: p = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) stdout, stderr = p.communicate() stderr = stderr.decode('utf-8', 'replace') if p.returncode != 0: raise XAttrMetadataError(p.returncode, stderr) else: # On Unix, and can't find pyxattr, setfattr, or xattr. if sys.platform.startswith('linux'): raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'pyxattr' or 'xattr' " "modules, or the GNU 'attr' package " "(which contains the 'setfattr' tool).") else: raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'xattr' module, " "or the 'xattr' binary.") def random_birthday(year_field, month_field, day_field): return { year_field: str(random.randint(1950, 1995)), month_field: str(random.randint(1, 12)), day_field: str(random.randint(1, 31)), }
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-20-fixed/youtube-dl/youtube_dl/utils.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-20-buggy/youtube-dl/youtube_dl/utils.py
youtube-dl-bug-2
# coding: utf-8 from __future__ import unicode_literals import base64 import datetime import hashlib import json import netrc import os import random import re import socket import sys import time import math from ..compat import ( compat_cookiejar, compat_cookies, compat_etree_fromstring, compat_getpass, compat_http_client, compat_os_name, compat_str, compat_urllib_error, compat_urllib_parse_unquote, compat_urllib_parse_urlencode, compat_urllib_request, compat_urlparse, compat_xml_parse_error, ) from ..downloader.f4m import ( get_base_url, remove_encrypted_media, ) from ..utils import ( NO_DEFAULT, age_restricted, base_url, bug_reports_message, clean_html, compiled_regex_type, determine_ext, determine_protocol, error_to_compat_str, ExtractorError, extract_attributes, fix_xml_ampersands, float_or_none, GeoRestrictedError, GeoUtils, int_or_none, js_to_json, mimetype2ext, orderedSet, parse_codecs, parse_duration, parse_iso8601, parse_m3u8_attributes, RegexNotFoundError, sanitized_Request, sanitize_filename, unescapeHTML, unified_strdate, unified_timestamp, update_Request, update_url_query, urljoin, url_basename, xpath_element, xpath_text, xpath_with_ns, ) class InfoExtractor(object): """Information Extractor class. Information extractors are the classes that, given a URL, extract information about the video (or videos) the URL refers to. This information includes the real video URL, the video title, author and others. The information is stored in a dictionary which is then passed to the YoutubeDL. The YoutubeDL processes this information possibly downloading the video to the file system, among other possible outcomes. The type field determines the type of the result. By far the most common value (and the default if _type is missing) is "video", which indicates a single video. For a video, the dictionaries must include the following fields: id: Video identifier. title: Video title, unescaped. Additionally, it must contain either a formats entry or a url one: formats: A list of dictionaries for each format available, ordered from worst to best quality. Potential fields: * url Mandatory. The URL of the video file * manifest_url The URL of the manifest file in case of fragmented media (DASH, hls, hds) * ext Will be calculated from URL if missing * format A human-readable description of the format ("mp4 container with h264/opus"). Calculated from the format_id, width, height. and format_note fields if missing. * format_id A short description of the format ("mp4_h264_opus" or "19"). Technically optional, but strongly recommended. * format_note Additional info about the format ("3D" or "DASH video") * width Width of the video, if known * height Height of the video, if known * resolution Textual description of width and height * tbr Average bitrate of audio and video in KBit/s * abr Average audio bitrate in KBit/s * acodec Name of the audio codec in use * asr Audio sampling rate in Hertz * vbr Average video bitrate in KBit/s * fps Frame rate * vcodec Name of the video codec in use * container Name of the container format * filesize The number of bytes, if known in advance * filesize_approx An estimate for the number of bytes * player_url SWF Player URL (used for rtmpdump). * protocol The protocol that will be used for the actual download, lower-case. "http", "https", "rtsp", "rtmp", "rtmpe", "m3u8", "m3u8_native" or "http_dash_segments". * fragment_base_url Base URL for fragments. Each fragment's path value (if present) will be relative to this URL. * fragments A list of fragments of a fragmented media. Each fragment entry must contain either an url or a path. If an url is present it should be considered by a client. Otherwise both path and fragment_base_url must be present. Here is the list of all potential fields: * "url" - fragment's URL * "path" - fragment's path relative to fragment_base_url * "duration" (optional, int or float) * "filesize" (optional, int) * preference Order number of this format. If this field is present and not None, the formats get sorted by this field, regardless of all other values. -1 for default (order by other properties), -2 or smaller for less than default. < -1000 to hide the format (if there is another one which is strictly better) * language Language code, e.g. "de" or "en-US". * language_preference Is this in the language mentioned in the URL? 10 if it's what the URL is about, -1 for default (don't know), -10 otherwise, other values reserved for now. * quality Order number of the video quality of this format, irrespective of the file format. -1 for default (order by other properties), -2 or smaller for less than default. * source_preference Order number for this video source (quality takes higher priority) -1 for default (order by other properties), -2 or smaller for less than default. * http_headers A dictionary of additional HTTP headers to add to the request. * stretched_ratio If given and not 1, indicates that the video's pixels are not square. width : height ratio as float. * no_resume The server does not support resuming the (HTTP or RTMP) download. Boolean. url: Final video URL. ext: Video filename extension. format: The video format, defaults to ext (used for --get-format) player_url: SWF Player URL (used for rtmpdump). The following fields are optional: alt_title: A secondary title of the video. display_id An alternative identifier for the video, not necessarily unique, but available before title. Typically, id is something like "4234987", title "Dancing naked mole rats", and display_id "dancing-naked-mole-rats" thumbnails: A list of dictionaries, with the following entries: * "id" (optional, string) - Thumbnail format ID * "url" * "preference" (optional, int) - quality of the image * "width" (optional, int) * "height" (optional, int) * "resolution" (optional, string "{width}x{height"}, deprecated) * "filesize" (optional, int) thumbnail: Full URL to a video thumbnail image. description: Full video description. uploader: Full name of the video uploader. license: License name the video is licensed under. creator: The creator of the video. release_date: The date (YYYYMMDD) when the video was released. timestamp: UNIX timestamp of the moment the video became available. upload_date: Video upload date (YYYYMMDD). If not explicitly set, calculated from timestamp. uploader_id: Nickname or id of the video uploader. uploader_url: Full URL to a personal webpage of the video uploader. location: Physical location where the video was filmed. subtitles: The available subtitles as a dictionary in the format {tag: subformats}. "tag" is usually a language code, and "subformats" is a list sorted from lower to higher preference, each element is a dictionary with the "ext" entry and one of: * "data": The subtitles file contents * "url": A URL pointing to the subtitles file "ext" will be calculated from URL if missing automatic_captions: Like 'subtitles', used by the YoutubeIE for automatically generated captions duration: Length of the video in seconds, as an integer or float. view_count: How many users have watched the video on the platform. like_count: Number of positive ratings of the video dislike_count: Number of negative ratings of the video repost_count: Number of reposts of the video average_rating: Average rating give by users, the scale used depends on the webpage comment_count: Number of comments on the video comments: A list of comments, each with one or more of the following properties (all but one of text or html optional): * "author" - human-readable name of the comment author * "author_id" - user ID of the comment author * "id" - Comment ID * "html" - Comment as HTML * "text" - Plain text of the comment * "timestamp" - UNIX timestamp of comment * "parent" - ID of the comment this one is replying to. Set to "root" to indicate that this is a comment to the original video. age_limit: Age restriction for the video, as an integer (years) webpage_url: The URL to the video webpage, if given to youtube-dl it should allow to get the same result again. (It will be set by YoutubeDL if it's missing) categories: A list of categories that the video falls in, for example ["Sports", "Berlin"] tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"] is_live: True, False, or None (=unknown). Whether this video is a live stream that goes on instead of a fixed-length video. start_time: Time in seconds where the reproduction should start, as specified in the URL. end_time: Time in seconds where the reproduction should end, as specified in the URL. chapters: A list of dictionaries, with the following entries: * "start_time" - The start time of the chapter in seconds * "end_time" - The end time of the chapter in seconds * "title" (optional, string) The following fields should only be used when the video belongs to some logical chapter or section: chapter: Name or title of the chapter the video belongs to. chapter_number: Number of the chapter the video belongs to, as an integer. chapter_id: Id of the chapter the video belongs to, as a unicode string. The following fields should only be used when the video is an episode of some series, programme or podcast: series: Title of the series or programme the video episode belongs to. season: Title of the season the video episode belongs to. season_number: Number of the season the video episode belongs to, as an integer. season_id: Id of the season the video episode belongs to, as a unicode string. episode: Title of the video episode. Unlike mandatory video title field, this field should denote the exact title of the video episode without any kind of decoration. episode_number: Number of the video episode within a season, as an integer. episode_id: Id of the video episode, as a unicode string. The following fields should only be used when the media is a track or a part of a music album: track: Title of the track. track_number: Number of the track within an album or a disc, as an integer. track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii), as a unicode string. artist: Artist(s) of the track. genre: Genre(s) of the track. album: Title of the album the track belongs to. album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc). album_artist: List of all artists appeared on the album (e.g. "Ash Borer / Fell Voices" or "Various Artists", useful for splits and compilations). disc_number: Number of the disc or other physical medium the track belongs to, as an integer. release_year: Year (YYYY) when the album was released. Unless mentioned otherwise, the fields should be Unicode strings. Unless mentioned otherwise, None is equivalent to absence of information. _type "playlist" indicates multiple videos. There must be a key "entries", which is a list, an iterable, or a PagedList object, each element of which is a valid dictionary by this specification. Additionally, playlists can have "id", "title", "description", "uploader", "uploader_id", "uploader_url" attributes with the same semantics as videos (see above). _type "multi_video" indicates that there are multiple videos that form a single show, for examples multiple acts of an opera or TV episode. It must have an entries key like a playlist and contain all the keys required for a video at the same time. _type "url" indicates that the video must be extracted from another location, possibly by a different extractor. Its only required key is: "url" - the next URL to extract. The key "ie_key" can be set to the class name (minus the trailing "IE", e.g. "Youtube") if the extractor class is known in advance. Additionally, the dictionary may have any properties of the resolved entity known in advance, for example "title" if the title of the referred video is known ahead of time. _type "url_transparent" entities have the same specification as "url", but indicate that the given additional information is more precise than the one associated with the resolved URL. This is useful when a site employs a video service that hosts the video and its technical metadata, but that video service does not embed a useful title, description etc. Subclasses of this one should re-define the _real_initialize() and _real_extract() methods and define a _VALID_URL regexp. Probably, they should also be added to the list of extractors. _GEO_BYPASS attribute may be set to False in order to disable geo restriction bypass mechanisms for a particular extractor. Though it won't disable explicit geo restriction bypass based on country code provided with geo_bypass_country. (experimental) _GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted countries for this extractor. One of these countries will be used by geo restriction bypass mechanism right away in order to bypass geo restriction, of course, if the mechanism is not disabled. (experimental) NB: both these geo attributes are experimental and may change in future or be completely removed. Finally, the _WORKING attribute should be set to False for broken IEs in order to warn the users and skip the tests. """ _ready = False _downloader = None _x_forwarded_for_ip = None _GEO_BYPASS = True _GEO_COUNTRIES = None _WORKING = True def __init__(self, downloader=None): """Constructor. Receives an optional downloader.""" self._ready = False self._x_forwarded_for_ip = None self.set_downloader(downloader) @classmethod def suitable(cls, url): """Receives a URL and returns True if suitable for this IE.""" # This does not use has/getattr intentionally - we want to know whether # we have cached the regexp for *this* class, whereas getattr would also # match the superclass if '_VALID_URL_RE' not in cls.__dict__: cls._VALID_URL_RE = re.compile(cls._VALID_URL) return cls._VALID_URL_RE.match(url) is not None @classmethod def _match_id(cls, url): if '_VALID_URL_RE' not in cls.__dict__: cls._VALID_URL_RE = re.compile(cls._VALID_URL) m = cls._VALID_URL_RE.match(url) assert m return compat_str(m.group('id')) @classmethod def working(cls): """Getter method for _WORKING.""" return cls._WORKING def initialize(self): """Initializes an instance (authentication, etc).""" self._initialize_geo_bypass(self._GEO_COUNTRIES) if not self._ready: self._real_initialize() self._ready = True def _initialize_geo_bypass(self, countries): """ Initialize geo restriction bypass mechanism. This method is used to initialize geo bypass mechanism based on faking X-Forwarded-For HTTP header. A random country from provided country list is selected and a random IP belonging to this country is generated. This IP will be passed as X-Forwarded-For HTTP header in all subsequent HTTP requests. This method will be used for initial geo bypass mechanism initialization during the instance initialization with _GEO_COUNTRIES. You may also manually call it from extractor's code if geo countries information is not available beforehand (e.g. obtained during extraction) or due to some another reason. """ if not self._x_forwarded_for_ip: country_code = self._downloader.params.get('geo_bypass_country', None) # If there is no explicit country for geo bypass specified and # the extractor is known to be geo restricted let's fake IP # as X-Forwarded-For right away. if (not country_code and self._GEO_BYPASS and self._downloader.params.get('geo_bypass', True) and countries): country_code = random.choice(countries) if country_code: self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code) if self._downloader.params.get('verbose', False): self._downloader.to_screen( '[debug] Using fake IP %s (%s) as X-Forwarded-For.' % (self._x_forwarded_for_ip, country_code.upper())) def extract(self, url): """Extracts URL information and returns it in list of dicts.""" try: for _ in range(2): try: self.initialize() ie_result = self._real_extract(url) if self._x_forwarded_for_ip: ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip return ie_result except GeoRestrictedError as e: if self.__maybe_fake_ip_and_retry(e.countries): continue raise except ExtractorError: raise except compat_http_client.IncompleteRead as e: raise ExtractorError('A network error has occurred.', cause=e, expected=True) except (KeyError, StopIteration) as e: raise ExtractorError('An extractor error has occurred.', cause=e) def __maybe_fake_ip_and_retry(self, countries): if (not self._downloader.params.get('geo_bypass_country', None) and self._GEO_BYPASS and self._downloader.params.get('geo_bypass', True) and not self._x_forwarded_for_ip and countries): country_code = random.choice(countries) self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code) if self._x_forwarded_for_ip: self.report_warning( 'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.' % (self._x_forwarded_for_ip, country_code.upper())) return True return False def set_downloader(self, downloader): """Sets the downloader for this IE.""" self._downloader = downloader def _real_initialize(self): """Real initialization process. Redefine in subclasses.""" pass def _real_extract(self, url): """Real extraction process. Redefine in subclasses.""" pass @classmethod def ie_key(cls): """A string for getting the InfoExtractor with get_info_extractor""" return compat_str(cls.__name__[:-2]) @property def IE_NAME(self): return compat_str(type(self).__name__[:-2]) def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}): """ Returns the response handle """ if note is None: self.report_download_webpage(video_id) elif note is not False: if video_id is None: self.to_screen('%s' % (note,)) else: self.to_screen('%s: %s' % (video_id, note)) # Some sites check X-Forwarded-For HTTP header in order to figure out # the origin of the client behind proxy. This allows bypassing geo # restriction by faking this header's value to IP that belongs to some # geo unrestricted country. We will do so once we encounter any # geo restriction error. if self._x_forwarded_for_ip: if 'X-Forwarded-For' not in headers: headers['X-Forwarded-For'] = self._x_forwarded_for_ip if isinstance(url_or_request, compat_urllib_request.Request): url_or_request = update_Request( url_or_request, data=data, headers=headers, query=query) else: if query: url_or_request = update_url_query(url_or_request, query) if data is not None or headers: url_or_request = sanitized_Request(url_or_request, data, headers) try: return self._downloader.urlopen(url_or_request) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: if errnote is False: return False if errnote is None: errnote = 'Unable to download webpage' errmsg = '%s: %s' % (errnote, error_to_compat_str(err)) if fatal: raise ExtractorError(errmsg, sys.exc_info()[2], cause=err) else: self._downloader.report_warning(errmsg) return False def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}): """ Returns a tuple (page content as string, URL handle) """ # Strip hashes from the URL (#1038) if isinstance(url_or_request, (compat_str, str)): url_or_request = url_or_request.partition('#')[0] urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query) if urlh is False: assert not fatal return False content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding) return (content, urlh) @staticmethod def _guess_encoding_from_content(content_type, webpage_bytes): m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type) if m: encoding = m.group(1) else: m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]', webpage_bytes[:1024]) if m: encoding = m.group(1).decode('ascii') elif webpage_bytes.startswith(b'\xff\xfe'): encoding = 'utf-16' else: encoding = 'utf-8' return encoding def __check_blocked(self, content): first_block = content[:512] if ('<title>Access to this site is blocked</title>' in content and 'Websense' in first_block): msg = 'Access to this webpage has been blocked by Websense filtering software in your network.' blocked_iframe = self._html_search_regex( r'<iframe src="([^"]+)"', content, 'Websense information URL', default=None) if blocked_iframe: msg += ' Visit %s for more details' % blocked_iframe raise ExtractorError(msg, expected=True) if '<title>The URL you requested has been blocked</title>' in first_block: msg = ( 'Access to this webpage has been blocked by Indian censorship. ' 'Use a VPN or proxy server (with --proxy) to route around it.') block_msg = self._html_search_regex( r'</h1><p>(.*?)</p>', content, 'block message', default=None) if block_msg: msg += ' (Message: "%s")' % block_msg.replace('\n', ' ') raise ExtractorError(msg, expected=True) if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content and 'blocklist.rkn.gov.ru' in content): raise ExtractorError( 'Access to this webpage has been blocked by decision of the Russian government. ' 'Visit http://blocklist.rkn.gov.ru/ for a block reason.', expected=True) def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None): content_type = urlh.headers.get('Content-Type', '') webpage_bytes = urlh.read() if prefix is not None: webpage_bytes = prefix + webpage_bytes if not encoding: encoding = self._guess_encoding_from_content(content_type, webpage_bytes) if self._downloader.params.get('dump_intermediate_pages', False): self.to_screen('Dumping request to ' + urlh.geturl()) dump = base64.b64encode(webpage_bytes).decode('ascii') self._downloader.to_screen(dump) if self._downloader.params.get('write_pages', False): basen = '%s_%s' % (video_id, urlh.geturl()) if len(basen) > 240: h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest() basen = basen[:240 - len(h)] + h raw_filename = basen + '.dump' filename = sanitize_filename(raw_filename, restricted=True) self.to_screen('Saving request to ' + filename) # Working around MAX_PATH limitation on Windows (see # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) if compat_os_name == 'nt': absfilepath = os.path.abspath(filename) if len(absfilepath) > 259: filename = '\\\\?\\' + absfilepath with open(filename, 'wb') as outf: outf.write(webpage_bytes) try: content = webpage_bytes.decode(encoding, 'replace') except LookupError: content = webpage_bytes.decode('utf-8', 'replace') self.__check_blocked(content) return content def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None, data=None, headers={}, query={}): """ Returns the data of the page as a string """ success = False try_count = 0 while success is False: try: res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding, data=data, headers=headers, query=query) success = True except compat_http_client.IncompleteRead as e: try_count += 1 if try_count >= tries: raise e self._sleep(timeout, video_id) if res is False: return res else: content, _ = res return content def _download_xml(self, url_or_request, video_id, note='Downloading XML', errnote='Unable to download XML', transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}): """Return the xml as an xml.etree.ElementTree.Element""" xml_string = self._download_webpage( url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query) if xml_string is False: return xml_string return self._parse_xml( xml_string, video_id, transform_source=transform_source, fatal=fatal) def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True): if transform_source: xml_string = transform_source(xml_string) try: return compat_etree_fromstring(xml_string.encode('utf-8')) except compat_xml_parse_error as ve: errmsg = '%s: Failed to parse XML ' % video_id if fatal: raise ExtractorError(errmsg, cause=ve) else: self.report_warning(errmsg + str(ve)) def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata', errnote='Unable to download JSON metadata', transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}): json_string = self._download_webpage( url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query) if (not fatal) and json_string is False: return None return self._parse_json( json_string, video_id, transform_source=transform_source, fatal=fatal) def _parse_json(self, json_string, video_id, transform_source=None, fatal=True): if transform_source: json_string = transform_source(json_string) try: return json.loads(json_string) except ValueError as ve: errmsg = '%s: Failed to parse JSON ' % video_id if fatal: raise ExtractorError(errmsg, cause=ve) else: self.report_warning(errmsg + str(ve)) def report_warning(self, msg, video_id=None): idstr = '' if video_id is None else '%s: ' % video_id self._downloader.report_warning( '[%s] %s%s' % (self.IE_NAME, idstr, msg)) def to_screen(self, msg): """Print msg to screen, prefixing it with '[ie_name]'""" self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg)) def report_extraction(self, id_or_name): """Report information extraction.""" self.to_screen('%s: Extracting information' % id_or_name) def report_download_webpage(self, video_id): """Report webpage download.""" self.to_screen('%s: Downloading webpage' % video_id) def report_age_confirmation(self): """Report attempt to confirm age.""" self.to_screen('Confirming age') def report_login(self): """Report attempt to log in.""" self.to_screen('Logging in') @staticmethod def raise_login_required(msg='This video is only available for registered users'): raise ExtractorError( '%s. Use --username and --password or --netrc to provide account credentials.' % msg, expected=True) @staticmethod def raise_geo_restricted(msg='This video is not available from your location due to geo restriction', countries=None): raise GeoRestrictedError(msg, countries=countries) # Methods for following #608 @staticmethod def url_result(url, ie=None, video_id=None, video_title=None): """Returns a URL that points to a page that should be processed""" # TODO: ie should be the class used for getting the info video_info = {'_type': 'url', 'url': url, 'ie_key': ie} if video_id is not None: video_info['id'] = video_id if video_title is not None: video_info['title'] = video_title return video_info def playlist_from_matches(self, matches, playlist_id=None, playlist_title=None, getter=None, ie=None): urls = orderedSet( self.url_result(self._proto_relative_url(getter(m) if getter else m), ie) for m in matches) return self.playlist_result( urls, playlist_id=playlist_id, playlist_title=playlist_title) @staticmethod def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None): """Returns a playlist""" video_info = {'_type': 'playlist', 'entries': entries} if playlist_id: video_info['id'] = playlist_id if playlist_title: video_info['title'] = playlist_title if playlist_description: video_info['description'] = playlist_description return video_info def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None): """ Perform a regex search on the given string, using a single or a list of patterns returning the first matching group. In case of failure return a default value or raise a WARNING or a RegexNotFoundError, depending on fatal, specifying the field name. """ if isinstance(pattern, (str, compat_str, compiled_regex_type)): mobj = re.search(pattern, string, flags) else: for p in pattern: mobj = re.search(p, string, flags) if mobj: break if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty(): _name = '\033[0;34m%s\033[0m' % name else: _name = name if mobj: if group is None: # return the first matching group return next(g for g in mobj.groups() if g is not None) else: return mobj.group(group) elif default is not NO_DEFAULT: return default elif fatal: raise RegexNotFoundError('Unable to extract %s' % _name) else: self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message()) return None def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None): """ Like _search_regex, but strips HTML tags and unescapes entities. """ res = self._search_regex(pattern, string, name, default, fatal, flags, group) if res: return clean_html(res).strip() else: return res def _get_netrc_login_info(self, netrc_machine=None): username = None password = None netrc_machine = netrc_machine or self._NETRC_MACHINE if self._downloader.params.get('usenetrc', False): try: info = netrc.netrc().authenticators(netrc_machine) if info is not None: username = info[0] password = info[2] else: raise netrc.NetrcParseError( 'No authenticators for %s' % netrc_machine) except (IOError, netrc.NetrcParseError) as err: self._downloader.report_warning( 'parsing .netrc: %s' % error_to_compat_str(err)) return username, password def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None): """ Get the login info as (username, password) First look for the manually specified credentials using username_option and password_option as keys in params dictionary. If no such credentials available look in the netrc file using the netrc_machine or _NETRC_MACHINE value. If there's no info available, return (None, None) """ if self._downloader is None: return (None, None) downloader_params = self._downloader.params # Attempt to use provided username and password or .netrc data if downloader_params.get(username_option) is not None: username = downloader_params[username_option] password = downloader_params[password_option] else: username, password = self._get_netrc_login_info(netrc_machine) return username, password def _get_tfa_info(self, note='two-factor verification code'): """ Get the two-factor authentication info TODO - asking the user will be required for sms/phone verify currently just uses the command line option If there's no info available, return None """ if self._downloader is None: return None downloader_params = self._downloader.params if downloader_params.get('twofactor') is not None: return downloader_params['twofactor'] return compat_getpass('Type %s and press [Return]: ' % note) # Helper functions for extracting OpenGraph info @staticmethod def _og_regexes(prop): content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))' property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)' % {'prop': re.escape(prop)}) template = r'<meta[^>]+?%s[^>]+?%s' return [ template % (property_re, content_re), template % (content_re, property_re), ] @staticmethod def _meta_regex(prop): return r'''(?isx)<meta (?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1) [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop) def _og_search_property(self, prop, html, name=None, **kargs): if not isinstance(prop, (list, tuple)): prop = [prop] if name is None: name = 'OpenGraph %s' % prop[0] og_regexes = [] for p in prop: og_regexes.extend(self._og_regexes(p)) escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs) if escaped is None: return None return unescapeHTML(escaped) def _og_search_thumbnail(self, html, **kargs): return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs) def _og_search_description(self, html, **kargs): return self._og_search_property('description', html, fatal=False, **kargs) def _og_search_title(self, html, **kargs): return self._og_search_property('title', html, **kargs) def _og_search_video_url(self, html, name='video url', secure=True, **kargs): regexes = self._og_regexes('video') + self._og_regexes('video:url') if secure: regexes = self._og_regexes('video:secure_url') + regexes return self._html_search_regex(regexes, html, name, **kargs) def _og_search_url(self, html, **kargs): return self._og_search_property('url', html, **kargs) def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs): if not isinstance(name, (list, tuple)): name = [name] if display_name is None: display_name = name[0] return self._html_search_regex( [self._meta_regex(n) for n in name], html, display_name, fatal=fatal, group='content', **kwargs) def _dc_search_uploader(self, html): return self._html_search_meta('dc.creator', html, 'uploader') def _rta_search(self, html): # See http://www.rtalabel.org/index.php?content=howtofaq#single if re.search(r'(?ix)<meta\s+name="rating"\s+' r' content="RTA-5042-1996-1400-1577-RTA"', html): return 18 return 0 def _media_rating_search(self, html): # See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/ rating = self._html_search_meta('rating', html) if not rating: return None RATING_TABLE = { 'safe for kids': 0, 'general': 8, '14 years': 14, 'mature': 17, 'restricted': 19, } return RATING_TABLE.get(rating.lower()) def _family_friendly_search(self, html): # See http://schema.org/VideoObject family_friendly = self._html_search_meta( 'isFamilyFriendly', html, default=None) if not family_friendly: return None RATING_TABLE = { '1': 0, 'true': 0, '0': 18, 'false': 18, } return RATING_TABLE.get(family_friendly.lower()) def _twitter_search_player(self, html): return self._html_search_meta('twitter:player', html, 'twitter card player') def _search_json_ld(self, html, video_id, expected_type=None, **kwargs): json_ld = self._search_regex( r'(?s)<script[^>]+type=(["\'])application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>', html, 'JSON-LD', group='json_ld', **kwargs) default = kwargs.get('default', NO_DEFAULT) if not json_ld: return default if default is not NO_DEFAULT else {} # JSON-LD may be malformed and thus `fatal` should be respected. # At the same time `default` may be passed that assumes `fatal=False` # for _search_regex. Let's simulate the same behavior here as well. fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False return self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type) def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None): if isinstance(json_ld, compat_str): json_ld = self._parse_json(json_ld, video_id, fatal=fatal) if not json_ld: return {} info = {} if not isinstance(json_ld, (list, tuple, dict)): return info if isinstance(json_ld, dict): json_ld = [json_ld] def extract_video_object(e): assert e['@type'] == 'VideoObject' info.update({ 'url': e.get('contentUrl'), 'title': unescapeHTML(e.get('name')), 'description': unescapeHTML(e.get('description')), 'thumbnail': e.get('thumbnailUrl') or e.get('thumbnailURL'), 'duration': parse_duration(e.get('duration')), 'timestamp': unified_timestamp(e.get('uploadDate')), 'filesize': float_or_none(e.get('contentSize')), 'tbr': int_or_none(e.get('bitrate')), 'width': int_or_none(e.get('width')), 'height': int_or_none(e.get('height')), 'view_count': int_or_none(e.get('interactionCount')), }) for e in json_ld: if e.get('@context') == 'http://schema.org': item_type = e.get('@type') if expected_type is not None and expected_type != item_type: return info if item_type in ('TVEpisode', 'Episode'): info.update({ 'episode': unescapeHTML(e.get('name')), 'episode_number': int_or_none(e.get('episodeNumber')), 'description': unescapeHTML(e.get('description')), }) part_of_season = e.get('partOfSeason') if isinstance(part_of_season, dict) and part_of_season.get('@type') in ('TVSeason', 'Season', 'CreativeWorkSeason'): info['season_number'] = int_or_none(part_of_season.get('seasonNumber')) part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries') if isinstance(part_of_series, dict) and part_of_series.get('@type') in ('TVSeries', 'Series', 'CreativeWorkSeries'): info['series'] = unescapeHTML(part_of_series.get('name')) elif item_type == 'Article': info.update({ 'timestamp': parse_iso8601(e.get('datePublished')), 'title': unescapeHTML(e.get('headline')), 'description': unescapeHTML(e.get('articleBody')), }) elif item_type == 'VideoObject': extract_video_object(e) continue video = e.get('video') if isinstance(video, dict) and video.get('@type') == 'VideoObject': extract_video_object(video) break return dict((k, v) for k, v in info.items() if v is not None) @staticmethod def _hidden_inputs(html): html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html) hidden_inputs = {} for input in re.findall(r'(?i)(<input[^>]+>)', html): attrs = extract_attributes(input) if not input: continue if attrs.get('type') not in ('hidden', 'submit'): continue name = attrs.get('name') or attrs.get('id') value = attrs.get('value') if name and value is not None: hidden_inputs[name] = value return hidden_inputs def _form_hidden_inputs(self, form_id, html): form = self._search_regex( r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id, html, '%s form' % form_id, group='form') return self._hidden_inputs(form) def _sort_formats(self, formats, field_preference=None): if not formats: raise ExtractorError('No video formats found') for f in formats: # Automatically determine tbr when missing based on abr and vbr (improves # formats sorting in some cases) if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None: f['tbr'] = f['abr'] + f['vbr'] def _formats_key(f): # TODO remove the following workaround from ..utils import determine_ext if not f.get('ext') and 'url' in f: f['ext'] = determine_ext(f['url']) if isinstance(field_preference, (list, tuple)): return tuple( f.get(field) if f.get(field) is not None else ('' if field == 'format_id' else -1) for field in field_preference) preference = f.get('preference') if preference is None: preference = 0 if f.get('ext') in ['f4f', 'f4m']: # Not yet supported preference -= 0.5 protocol = f.get('protocol') or determine_protocol(f) proto_preference = 0 if protocol in ['http', 'https'] else (-0.5 if protocol == 'rtsp' else -0.1) if f.get('vcodec') == 'none': # audio only preference -= 50 if self._downloader.params.get('prefer_free_formats'): ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus'] else: ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a'] ext_preference = 0 try: audio_ext_preference = ORDER.index(f['ext']) except ValueError: audio_ext_preference = -1 else: if f.get('acodec') == 'none': # video only preference -= 40 if self._downloader.params.get('prefer_free_formats'): ORDER = ['flv', 'mp4', 'webm'] else: ORDER = ['webm', 'flv', 'mp4'] try: ext_preference = ORDER.index(f['ext']) except ValueError: ext_preference = -1 audio_ext_preference = 0 return ( preference, f.get('language_preference') if f.get('language_preference') is not None else -1, f.get('quality') if f.get('quality') is not None else -1, f.get('tbr') if f.get('tbr') is not None else -1, f.get('filesize') if f.get('filesize') is not None else -1, f.get('vbr') if f.get('vbr') is not None else -1, f.get('height') if f.get('height') is not None else -1, f.get('width') if f.get('width') is not None else -1, proto_preference, ext_preference, f.get('abr') if f.get('abr') is not None else -1, audio_ext_preference, f.get('fps') if f.get('fps') is not None else -1, f.get('filesize_approx') if f.get('filesize_approx') is not None else -1, f.get('source_preference') if f.get('source_preference') is not None else -1, f.get('format_id') if f.get('format_id') is not None else '', ) formats.sort(key=_formats_key) def _check_formats(self, formats, video_id): if formats: formats[:] = filter( lambda f: self._is_valid_url( f['url'], video_id, item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'), formats) @staticmethod def _remove_duplicate_formats(formats): format_urls = set() unique_formats = [] for f in formats: if f['url'] not in format_urls: format_urls.add(f['url']) unique_formats.append(f) formats[:] = unique_formats def _is_valid_url(self, url, video_id, item='video', headers={}): url = self._proto_relative_url(url, scheme='http:') # For now assume non HTTP(S) URLs always valid if not (url.startswith('http://') or url.startswith('https://')): return True try: self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers) return True except ExtractorError as e: if isinstance(e.cause, compat_urllib_error.URLError): self.to_screen( '%s: %s URL is invalid, skipping' % (video_id, item)) return False raise def http_scheme(self): """ Either "http:" or "https:", depending on the user's preferences """ return ( 'http:' if self._downloader.params.get('prefer_insecure', False) else 'https:') def _proto_relative_url(self, url, scheme=None): if url is None: return url if url.startswith('//'): if scheme is None: scheme = self.http_scheme() return scheme + url else: return url def _sleep(self, timeout, video_id, msg_template=None): if msg_template is None: msg_template = '%(video_id)s: Waiting for %(timeout)s seconds' msg = msg_template % {'video_id': video_id, 'timeout': timeout} self.to_screen(msg) time.sleep(timeout) def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None, transform_source=lambda s: fix_xml_ampersands(s).strip(), fatal=True, m3u8_id=None): manifest = self._download_xml( manifest_url, video_id, 'Downloading f4m manifest', 'Unable to download f4m manifest', # Some manifests may be malformed, e.g. prosiebensat1 generated manifests # (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244) transform_source=transform_source, fatal=fatal) if manifest is False: return [] return self._parse_f4m_formats( manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id, transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id) def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None, transform_source=lambda s: fix_xml_ampersands(s).strip(), fatal=True, m3u8_id=None): # currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0') if akamai_pv is not None and ';' in akamai_pv.text: playerVerificationChallenge = akamai_pv.text.split(';')[0] if playerVerificationChallenge.strip() != '': return [] formats = [] manifest_version = '1.0' media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media') if not media_nodes: manifest_version = '2.0' media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media') # Remove unsupported DRM protected media from final formats # rendition (see https://github.com/rg3/youtube-dl/issues/8573). media_nodes = remove_encrypted_media(media_nodes) if not media_nodes: return formats manifest_base_url = get_base_url(manifest) bootstrap_info = xpath_element( manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'], 'bootstrap info', default=None) vcodec = None mime_type = xpath_text( manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'], 'base URL', default=None) if mime_type and mime_type.startswith('audio/'): vcodec = 'none' for i, media_el in enumerate(media_nodes): tbr = int_or_none(media_el.attrib.get('bitrate')) width = int_or_none(media_el.attrib.get('width')) height = int_or_none(media_el.attrib.get('height')) format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)])) # If <bootstrapInfo> is present, the specified f4m is a # stream-level manifest, and only set-level manifests may refer to # external resources. See section 11.4 and section 4 of F4M spec if bootstrap_info is None: media_url = None # @href is introduced in 2.0, see section 11.6 of F4M spec if manifest_version == '2.0': media_url = media_el.attrib.get('href') if media_url is None: media_url = media_el.attrib.get('url') if not media_url: continue manifest_url = ( media_url if media_url.startswith('http://') or media_url.startswith('https://') else ((manifest_base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url)) # If media_url is itself a f4m manifest do the recursive extraction # since bitrates in parent manifest (this one) and media_url manifest # may differ leading to inability to resolve the format by requested # bitrate in f4m downloader ext = determine_ext(manifest_url) if ext == 'f4m': f4m_formats = self._extract_f4m_formats( manifest_url, video_id, preference=preference, f4m_id=f4m_id, transform_source=transform_source, fatal=fatal) # Sometimes stream-level manifest contains single media entry that # does not contain any quality metadata (e.g. http://matchtv.ru/#live-player). # At the same time parent's media entry in set-level manifest may # contain it. We will copy it from parent in such cases. if len(f4m_formats) == 1: f = f4m_formats[0] f.update({ 'tbr': f.get('tbr') or tbr, 'width': f.get('width') or width, 'height': f.get('height') or height, 'format_id': f.get('format_id') if not tbr else format_id, 'vcodec': vcodec, }) formats.extend(f4m_formats) continue elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats( manifest_url, video_id, 'mp4', preference=preference, m3u8_id=m3u8_id, fatal=fatal)) continue formats.append({ 'format_id': format_id, 'url': manifest_url, 'manifest_url': manifest_url, 'ext': 'flv' if bootstrap_info is not None else None, 'protocol': 'f4m', 'tbr': tbr, 'width': width, 'height': height, 'vcodec': vcodec, 'preference': preference, }) return formats def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None): return { 'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])), 'url': m3u8_url, 'ext': ext, 'protocol': 'm3u8', 'preference': preference - 100 if preference else -100, 'resolution': 'multiple', 'format_note': 'Quality selection URL', } def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None, entry_protocol='m3u8', preference=None, m3u8_id=None, note=None, errnote=None, fatal=True, live=False): res = self._download_webpage_handle( m3u8_url, video_id, note=note or 'Downloading m3u8 information', errnote=errnote or 'Failed to download m3u8 information', fatal=fatal) if res is False: return [] m3u8_doc, urlh = res m3u8_url = urlh.geturl() return self._parse_m3u8_formats( m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol, preference=preference, m3u8_id=m3u8_id, live=live) def _parse_m3u8_formats(self, m3u8_doc, m3u8_url, ext=None, entry_protocol='m3u8', preference=None, m3u8_id=None, live=False): if '#EXT-X-FAXS-CM:' in m3u8_doc: # Adobe Flash Access return [] if re.search(r'#EXT-X-SESSION-KEY:.*?URI="skd://', m3u8_doc): # Apple FairPlay return [] formats = [] format_url = lambda u: ( u if re.match(r'^https?://', u) else compat_urlparse.urljoin(m3u8_url, u)) # References: # 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21 # 2. https://github.com/rg3/youtube-dl/issues/12211 # We should try extracting formats only from master playlists [1, 4.3.4], # i.e. playlists that describe available qualities. On the other hand # media playlists [1, 4.3.3] should be returned as is since they contain # just the media without qualities renditions. # Fortunately, master playlist can be easily distinguished from media # playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4] # master playlist tags MUST NOT appear in a media playist and vice versa. # As of [1, 4.3.3.1] #EXT-X-TARGETDURATION tag is REQUIRED for every # media playlist and MUST NOT appear in master playlist thus we can # clearly detect media playlist with this criterion. if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is return [{ 'url': m3u8_url, 'format_id': m3u8_id, 'ext': ext, 'protocol': entry_protocol, 'preference': preference, }] groups = {} last_stream_inf = {} def extract_media(x_media_line): media = parse_m3u8_attributes(x_media_line) # As per [1, 4.3.4.1] TYPE, GROUP-ID and NAME are REQUIRED media_type, group_id, name = media.get('TYPE'), media.get('GROUP-ID'), media.get('NAME') if not (media_type and group_id and name): return groups.setdefault(group_id, []).append(media) if media_type not in ('VIDEO', 'AUDIO'): return media_url = media.get('URI') if media_url: format_id = [] for v in (m3u8_id, group_id, name): if v: format_id.append(v) f = { 'format_id': '-'.join(format_id), 'url': format_url(media_url), 'manifest_url': m3u8_url, 'language': media.get('LANGUAGE'), 'ext': ext, 'protocol': entry_protocol, 'preference': preference, } if media_type == 'AUDIO': f['vcodec'] = 'none' formats.append(f) def build_stream_name(): # Despite specification does not mention NAME attribute for # EXT-X-STREAM-INF tag it still sometimes may be present (see [1] # or vidio test in TestInfoExtractor.test_parse_m3u8_formats) # 1. http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015 stream_name = last_stream_inf.get('NAME') if stream_name: return stream_name # If there is no NAME in EXT-X-STREAM-INF it will be obtained # from corresponding rendition group stream_group_id = last_stream_inf.get('VIDEO') if not stream_group_id: return stream_group = groups.get(stream_group_id) if not stream_group: return stream_group_id rendition = stream_group[0] return rendition.get('NAME') or stream_group_id for line in m3u8_doc.splitlines(): if line.startswith('#EXT-X-STREAM-INF:'): last_stream_inf = parse_m3u8_attributes(line) elif line.startswith('#EXT-X-MEDIA:'): extract_media(line) elif line.startswith('#') or not line.strip(): continue else: tbr = float_or_none( last_stream_inf.get('AVERAGE-BANDWIDTH') or last_stream_inf.get('BANDWIDTH'), scale=1000) format_id = [] if m3u8_id: format_id.append(m3u8_id) stream_name = build_stream_name() # Bandwidth of live streams may differ over time thus making # format_id unpredictable. So it's better to keep provided # format_id intact. if not live: format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats))) manifest_url = format_url(line.strip()) f = { 'format_id': '-'.join(format_id), 'url': manifest_url, 'manifest_url': m3u8_url, 'tbr': tbr, 'ext': ext, 'fps': float_or_none(last_stream_inf.get('FRAME-RATE')), 'protocol': entry_protocol, 'preference': preference, } resolution = last_stream_inf.get('RESOLUTION') if resolution: mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution) if mobj: f['width'] = int(mobj.group('width')) f['height'] = int(mobj.group('height')) # Unified Streaming Platform mobj = re.search( r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url']) if mobj: abr, vbr = mobj.groups() abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000) f.update({ 'vbr': vbr, 'abr': abr, }) codecs = parse_codecs(last_stream_inf.get('CODECS')) f.update(codecs) audio_group_id = last_stream_inf.get('AUDIO') # As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which # references a rendition group MUST have a CODECS attribute. # However, this is not always respected, for example, [2] # contains EXT-X-STREAM-INF tag which references AUDIO # rendition group but does not have CODECS and despite # referencing audio group an audio group, it represents # a complete (with audio and video) format. So, for such cases # we will ignore references to rendition groups and treat them # as complete formats. if audio_group_id and codecs and f.get('vcodec') != 'none': audio_group = groups.get(audio_group_id) if audio_group and audio_group[0].get('URI'): # TODO: update acodec for audio only formats with # the same GROUP-ID f['acodec'] = 'none' formats.append(f) last_stream_inf = {} return formats @staticmethod def _xpath_ns(path, namespace=None): if not namespace: return path out = [] for c in path.split('/'): if not c or c == '.': out.append(c) else: out.append('{%s}%s' % (namespace, c)) return '/'.join(out) def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None): smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source) if smil is False: assert not fatal return [] namespace = self._parse_smil_namespace(smil) return self._parse_smil_formats( smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params) def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None): smil = self._download_smil(smil_url, video_id, fatal=fatal) if smil is False: return {} return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params) def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None): return self._download_xml( smil_url, video_id, 'Downloading SMIL file', 'Unable to download SMIL file', fatal=fatal, transform_source=transform_source) def _parse_smil(self, smil, smil_url, video_id, f4m_params=None): namespace = self._parse_smil_namespace(smil) formats = self._parse_smil_formats( smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params) subtitles = self._parse_smil_subtitles(smil, namespace=namespace) video_id = os.path.splitext(url_basename(smil_url))[0] title = None description = None upload_date = None for meta in smil.findall(self._xpath_ns('./head/meta', namespace)): name = meta.attrib.get('name') content = meta.attrib.get('content') if not name or not content: continue if not title and name == 'title': title = content elif not description and name in ('description', 'abstract'): description = content elif not upload_date and name == 'date': upload_date = unified_strdate(content) thumbnails = [{ 'id': image.get('type'), 'url': image.get('src'), 'width': int_or_none(image.get('width')), 'height': int_or_none(image.get('height')), } for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')] return { 'id': video_id, 'title': title or video_id, 'description': description, 'upload_date': upload_date, 'thumbnails': thumbnails, 'formats': formats, 'subtitles': subtitles, } def _parse_smil_namespace(self, smil): return self._search_regex( r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None) def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None): base = smil_url for meta in smil.findall(self._xpath_ns('./head/meta', namespace)): b = meta.get('base') or meta.get('httpBase') if b: base = b break formats = [] rtmp_count = 0 http_count = 0 m3u8_count = 0 srcs = [] media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace)) for medium in media: src = medium.get('src') if not src or src in srcs: continue srcs.append(src) bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000) filesize = int_or_none(medium.get('size') or medium.get('fileSize')) width = int_or_none(medium.get('width')) height = int_or_none(medium.get('height')) proto = medium.get('proto') ext = medium.get('ext') src_ext = determine_ext(src) streamer = medium.get('streamer') or base if proto == 'rtmp' or streamer.startswith('rtmp'): rtmp_count += 1 formats.append({ 'url': streamer, 'play_path': src, 'ext': 'flv', 'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate), 'tbr': bitrate, 'filesize': filesize, 'width': width, 'height': height, }) if transform_rtmp_url: streamer, src = transform_rtmp_url(streamer, src) formats[-1].update({ 'url': streamer, 'play_path': src, }) continue src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src) src_url = src_url.strip() if proto == 'm3u8' or src_ext == 'm3u8': m3u8_formats = self._extract_m3u8_formats( src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False) if len(m3u8_formats) == 1: m3u8_count += 1 m3u8_formats[0].update({ 'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate), 'tbr': bitrate, 'width': width, 'height': height, }) formats.extend(m3u8_formats) continue if src_ext == 'f4m': f4m_url = src_url if not f4m_params: f4m_params = { 'hdcore': '3.2.0', 'plugin': 'flowplayer-3.2.0.1', } f4m_url += '&' if '?' in f4m_url else '?' f4m_url += compat_urllib_parse_urlencode(f4m_params) formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False)) continue if src_url.startswith('http') and self._is_valid_url(src, video_id): http_count += 1 formats.append({ 'url': src_url, 'ext': ext or src_ext or 'flv', 'format_id': 'http-%d' % (bitrate or http_count), 'tbr': bitrate, 'filesize': filesize, 'width': width, 'height': height, }) continue return formats def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'): urls = [] subtitles = {} for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))): src = textstream.get('src') if not src or src in urls: continue urls.append(src) ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src) lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang subtitles.setdefault(lang, []).append({ 'url': src, 'ext': ext, }) return subtitles def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True): xspf = self._download_xml( playlist_url, playlist_id, 'Downloading xpsf playlist', 'Unable to download xspf manifest', fatal=fatal) if xspf is False: return [] return self._parse_xspf(xspf, playlist_id) def _parse_xspf(self, playlist, playlist_id): NS_MAP = { 'xspf': 'http://xspf.org/ns/0/', 's1': 'http://static.streamone.nl/player/ns/0', } entries = [] for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)): title = xpath_text( track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id) description = xpath_text( track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description') thumbnail = xpath_text( track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail') duration = float_or_none( xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000) formats = [{ 'url': location.text, 'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)), 'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))), 'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))), } for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))] self._sort_formats(formats) entries.append({ 'id': playlist_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'formats': formats, }) return entries def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}): res = self._download_webpage_handle( mpd_url, video_id, note=note or 'Downloading MPD manifest', errnote=errnote or 'Failed to download MPD manifest', fatal=fatal) if res is False: return [] mpd, urlh = res mpd_base_url = base_url(urlh.geturl()) return self._parse_mpd_formats( compat_etree_fromstring(mpd.encode('utf-8')), mpd_id, mpd_base_url, formats_dict=formats_dict, mpd_url=mpd_url) def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}, mpd_url=None): """ Parse formats from MPD manifest. References: 1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E), http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip 2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP """ if mpd_doc.get('type') == 'dynamic': return [] namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None) def _add_ns(path): return self._xpath_ns(path, namespace) def is_drm_protected(element): return element.find(_add_ns('ContentProtection')) is not None def extract_multisegment_info(element, ms_parent_info): ms_info = ms_parent_info.copy() # As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some # common attributes and elements. We will only extract relevant # for us. def extract_common(source): segment_timeline = source.find(_add_ns('SegmentTimeline')) if segment_timeline is not None: s_e = segment_timeline.findall(_add_ns('S')) if s_e: ms_info['total_number'] = 0 ms_info['s'] = [] for s in s_e: r = int(s.get('r', 0)) ms_info['total_number'] += 1 + r ms_info['s'].append({ 't': int(s.get('t', 0)), # @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60]) 'd': int(s.attrib['d']), 'r': r, }) start_number = source.get('startNumber') if start_number: ms_info['start_number'] = int(start_number) timescale = source.get('timescale') if timescale: ms_info['timescale'] = int(timescale) segment_duration = source.get('duration') if segment_duration: ms_info['segment_duration'] = float(segment_duration) def extract_Initialization(source): initialization = source.find(_add_ns('Initialization')) if initialization is not None: ms_info['initialization_url'] = initialization.attrib['sourceURL'] segment_list = element.find(_add_ns('SegmentList')) if segment_list is not None: extract_common(segment_list) extract_Initialization(segment_list) segment_urls_e = segment_list.findall(_add_ns('SegmentURL')) if segment_urls_e: ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e] else: segment_template = element.find(_add_ns('SegmentTemplate')) if segment_template is not None: extract_common(segment_template) media = segment_template.get('media') if media: ms_info['media'] = media initialization = segment_template.get('initialization') if initialization: ms_info['initialization'] = initialization else: extract_Initialization(segment_template) return ms_info mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration')) formats = [] for period in mpd_doc.findall(_add_ns('Period')): period_duration = parse_duration(period.get('duration')) or mpd_duration period_ms_info = extract_multisegment_info(period, { 'start_number': 1, 'timescale': 1, }) for adaptation_set in period.findall(_add_ns('AdaptationSet')): if is_drm_protected(adaptation_set): continue adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info) for representation in adaptation_set.findall(_add_ns('Representation')): if is_drm_protected(representation): continue representation_attrib = adaptation_set.attrib.copy() representation_attrib.update(representation.attrib) # According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory mime_type = representation_attrib['mimeType'] content_type = mime_type.split('/')[0] if content_type == 'text': # TODO implement WebVTT downloading pass elif content_type in ('video', 'audio'): base_url = '' for element in (representation, adaptation_set, period, mpd_doc): base_url_e = element.find(_add_ns('BaseURL')) if base_url_e is not None: base_url = base_url_e.text + base_url if re.match(r'^https?://', base_url): break if mpd_base_url and not re.match(r'^https?://', base_url): if not mpd_base_url.endswith('/') and not base_url.startswith('/'): mpd_base_url += '/' base_url = mpd_base_url + base_url representation_id = representation_attrib.get('id') lang = representation_attrib.get('lang') url_el = representation.find(_add_ns('BaseURL')) filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None) bandwidth = int_or_none(representation_attrib.get('bandwidth')) f = { 'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id, 'url': base_url, 'manifest_url': mpd_url, 'ext': mimetype2ext(mime_type), 'width': int_or_none(representation_attrib.get('width')), 'height': int_or_none(representation_attrib.get('height')), 'tbr': float_or_none(bandwidth, 1000), 'asr': int_or_none(representation_attrib.get('audioSamplingRate')), 'fps': int_or_none(representation_attrib.get('frameRate')), 'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None, 'format_note': 'DASH %s' % content_type, 'filesize': filesize, } f.update(parse_codecs(representation_attrib.get('codecs'))) representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info) def prepare_template(template_name, identifiers): t = representation_ms_info[template_name] t = t.replace('$RepresentationID$', representation_id) t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t) t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t) t.replace('$$', '$') return t # @initialization is a regular template like @media one # so it should be handled just the same way (see # https://github.com/rg3/youtube-dl/issues/11605) if 'initialization' in representation_ms_info: initialization_template = prepare_template( 'initialization', # As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and # $Time$ shall not be included for @initialization thus # only $Bandwidth$ remains ('Bandwidth', )) representation_ms_info['initialization_url'] = initialization_template % { 'Bandwidth': bandwidth, } def location_key(location): return 'url' if re.match(r'^https?://', location) else 'path' if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info: media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time')) media_location_key = location_key(media_template) # As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$ # can't be used at the same time if '%(Number' in media_template and 's' not in representation_ms_info: segment_duration = None if 'total_number' not in representation_ms_info and 'segment_duration' in representation_ms_info: segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale']) representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration)) representation_ms_info['fragments'] = [{ media_location_key: media_template % { 'Number': segment_number, 'Bandwidth': bandwidth, }, 'duration': segment_duration, } for segment_number in range( representation_ms_info['start_number'], representation_ms_info['total_number'] + representation_ms_info['start_number'])] else: # $Number*$ or $Time$ in media template with S list available # Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg # Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411 representation_ms_info['fragments'] = [] segment_time = 0 segment_d = None segment_number = representation_ms_info['start_number'] def add_segment_url(): segment_url = media_template % { 'Time': segment_time, 'Bandwidth': bandwidth, 'Number': segment_number, } representation_ms_info['fragments'].append({ media_location_key: segment_url, 'duration': float_or_none(segment_d, representation_ms_info['timescale']), }) for num, s in enumerate(representation_ms_info['s']): segment_time = s.get('t') or segment_time segment_d = s['d'] add_segment_url() segment_number += 1 for r in range(s.get('r', 0)): segment_time += segment_d add_segment_url() segment_number += 1 segment_time += segment_d elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info: # No media template # Example: https://www.youtube.com/watch?v=iXZV5uAYMJI # or any YouTube dashsegments video fragments = [] segment_index = 0 timescale = representation_ms_info['timescale'] for s in representation_ms_info['s']: duration = float_or_none(s['d'], timescale) for r in range(s.get('r', 0) + 1): segment_uri = representation_ms_info['segment_urls'][segment_index] fragments.append({ location_key(segment_uri): segment_uri, 'duration': duration, }) segment_index += 1 representation_ms_info['fragments'] = fragments elif 'segment_urls' in representation_ms_info: # Segment URLs with no SegmentTimeline # Example: https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091 # https://github.com/rg3/youtube-dl/pull/14844 fragments = [] segment_duration = float_or_none( representation_ms_info['segment_duration'], representation_ms_info['timescale']) if 'segment_duration' in representation_ms_info else None for segment_url in representation_ms_info['segment_urls']: fragment = { location_key(segment_url): segment_url, } if segment_duration: fragment['duration'] = segment_duration fragments.append(fragment) representation_ms_info['fragments'] = fragments # NB: MPD manifest may contain direct URLs to unfragmented media. # No fragments key is present in this case. if 'fragments' in representation_ms_info: f.update({ 'fragment_base_url': base_url, 'fragments': [], 'protocol': 'http_dash_segments', }) if 'initialization_url' in representation_ms_info: initialization_url = representation_ms_info['initialization_url'] if not f.get('url'): f['url'] = initialization_url f['fragments'].append({location_key(initialization_url): initialization_url}) f['fragments'].extend(representation_ms_info['fragments']) try: existing_format = next( fo for fo in formats if fo['format_id'] == representation_id) except StopIteration: full_info = formats_dict.get(representation_id, {}).copy() full_info.update(f) formats.append(full_info) else: existing_format.update(f) else: self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type) return formats def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True): res = self._download_webpage_handle( ism_url, video_id, note=note or 'Downloading ISM manifest', errnote=errnote or 'Failed to download ISM manifest', fatal=fatal) if res is False: return [] ism, urlh = res return self._parse_ism_formats( compat_etree_fromstring(ism.encode('utf-8')), urlh.geturl(), ism_id) def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None): """ Parse formats from ISM manifest. References: 1. [MS-SSTR]: Smooth Streaming Protocol, https://msdn.microsoft.com/en-us/library/ff469518.aspx """ if ism_doc.get('IsLive') == 'TRUE' or ism_doc.find('Protection') is not None: return [] duration = int(ism_doc.attrib['Duration']) timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000 formats = [] for stream in ism_doc.findall('StreamIndex'): stream_type = stream.get('Type') if stream_type not in ('video', 'audio'): continue url_pattern = stream.attrib['Url'] stream_timescale = int_or_none(stream.get('TimeScale')) or timescale stream_name = stream.get('Name') for track in stream.findall('QualityLevel'): fourcc = track.get('FourCC') # TODO: add support for WVC1 and WMAP if fourcc not in ('H264', 'AVC1', 'AACL'): self.report_warning('%s is not a supported codec' % fourcc) continue tbr = int(track.attrib['Bitrate']) // 1000 # [1] does not mention Width and Height attributes. However, # they're often present while MaxWidth and MaxHeight are # missing, so should be used as fallbacks width = int_or_none(track.get('MaxWidth') or track.get('Width')) height = int_or_none(track.get('MaxHeight') or track.get('Height')) sampling_rate = int_or_none(track.get('SamplingRate')) track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern) track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern) fragments = [] fragment_ctx = { 'time': 0, } stream_fragments = stream.findall('c') for stream_fragment_index, stream_fragment in enumerate(stream_fragments): fragment_ctx['time'] = int_or_none(stream_fragment.get('t')) or fragment_ctx['time'] fragment_repeat = int_or_none(stream_fragment.get('r')) or 1 fragment_ctx['duration'] = int_or_none(stream_fragment.get('d')) if not fragment_ctx['duration']: try: next_fragment_time = int(stream_fragment[stream_fragment_index + 1].attrib['t']) except IndexError: next_fragment_time = duration fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat for _ in range(fragment_repeat): fragments.append({ 'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern), 'duration': fragment_ctx['duration'] / stream_timescale, }) fragment_ctx['time'] += fragment_ctx['duration'] format_id = [] if ism_id: format_id.append(ism_id) if stream_name: format_id.append(stream_name) format_id.append(compat_str(tbr)) formats.append({ 'format_id': '-'.join(format_id), 'url': ism_url, 'manifest_url': ism_url, 'ext': 'ismv' if stream_type == 'video' else 'isma', 'width': width, 'height': height, 'tbr': tbr, 'asr': sampling_rate, 'vcodec': 'none' if stream_type == 'audio' else fourcc, 'acodec': 'none' if stream_type == 'video' else fourcc, 'protocol': 'ism', 'fragments': fragments, '_download_params': { 'duration': duration, 'timescale': stream_timescale, 'width': width or 0, 'height': height or 0, 'fourcc': fourcc, 'codec_private_data': track.get('CodecPrivateData'), 'sampling_rate': sampling_rate, 'channels': int_or_none(track.get('Channels', 2)), 'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)), 'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)), }, }) return formats def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None, preference=None): def absolute_url(video_url): return compat_urlparse.urljoin(base_url, video_url) def parse_content_type(content_type): if not content_type: return {} ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type) if ctr: mimetype, codecs = ctr.groups() f = parse_codecs(codecs) f['ext'] = mimetype2ext(mimetype) return f return {} def _media_formats(src, cur_media_type, type_info={}): full_url = absolute_url(src) ext = type_info.get('ext') or determine_ext(full_url) if ext == 'm3u8': is_plain_url = False formats = self._extract_m3u8_formats( full_url, video_id, ext='mp4', entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id, preference=preference, fatal=False) elif ext == 'mpd': is_plain_url = False formats = self._extract_mpd_formats( full_url, video_id, mpd_id=mpd_id, fatal=False) else: is_plain_url = True formats = [{ 'url': full_url, 'vcodec': 'none' if cur_media_type == 'audio' else None, }] return is_plain_url, formats entries = [] # amp-video and amp-audio are very similar to their HTML5 counterparts # so we wll include them right here (see # https://www.ampproject.org/docs/reference/components/amp-video) media_tags = [(media_tag, media_type, '') for media_tag, media_type in re.findall(r'(?s)(<(?:amp-)?(video|audio)[^>]*/>)', webpage)] media_tags.extend(re.findall( # We only allow video|audio followed by a whitespace or '>'. # Allowing more characters may end up in significant slow down (see # https://github.com/rg3/youtube-dl/issues/11979, example URL: # http://www.porntrex.com/maps/videositemap.xml). r'(?s)(<(?P<tag>(?:amp-)?(?:video|audio))(?:\s+[^>]*)?>)(.*?)</(?P=tag)>', webpage)) for media_tag, media_type, media_content in media_tags: media_info = { 'formats': [], 'subtitles': {}, } media_attributes = extract_attributes(media_tag) src = media_attributes.get('src') if src: _, formats = _media_formats(src, media_type) media_info['formats'].extend(formats) media_info['thumbnail'] = media_attributes.get('poster') if media_content: for source_tag in re.findall(r'<source[^>]+>', media_content): source_attributes = extract_attributes(source_tag) src = source_attributes.get('src') if not src: continue f = parse_content_type(source_attributes.get('type')) is_plain_url, formats = _media_formats(src, media_type, f) if is_plain_url: # res attribute is not standard but seen several times # in the wild f.update({ 'height': int_or_none(source_attributes.get('res')), 'format_id': source_attributes.get('label'), }) f.update(formats[0]) media_info['formats'].append(f) else: media_info['formats'].extend(formats) for track_tag in re.findall(r'<track[^>]+>', media_content): track_attributes = extract_attributes(track_tag) kind = track_attributes.get('kind') if not kind or kind in ('subtitles', 'captions'): src = track_attributes.get('src') if not src: continue lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label') media_info['subtitles'].setdefault(lang, []).append({ 'url': absolute_url(src), }) if media_info['formats'] or media_info['subtitles']: entries.append(media_info) return entries def _extract_akamai_formats(self, manifest_url, video_id, hosts={}): formats = [] hdcore_sign = 'hdcore=3.7.0' f4m_url = re.sub(r'(https?://[^/]+)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m') hds_host = hosts.get('hds') if hds_host: f4m_url = re.sub(r'(https?://)[^/]+', r'\1' + hds_host, f4m_url) if 'hdcore=' not in f4m_url: f4m_url += ('&' if '?' in f4m_url else '?') + hdcore_sign f4m_formats = self._extract_f4m_formats( f4m_url, video_id, f4m_id='hds', fatal=False) for entry in f4m_formats: entry.update({'extra_param_to_segment_url': hdcore_sign}) formats.extend(f4m_formats) m3u8_url = re.sub(r'(https?://[^/]+)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8') hls_host = hosts.get('hls') if hls_host: m3u8_url = re.sub(r'(https?://)[^/]+', r'\1' + hls_host, m3u8_url) formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) return formats def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]): query = compat_urlparse.urlparse(url).query url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url) url_base = self._search_regex( r'(?:(?:https?|rtmp|rtsp):)?(//[^?]+)', url, 'format url') http_base_url = '%s:%s' % ('http', url_base) formats = [] def manifest_url(manifest): m_url = '%s/%s' % (http_base_url, manifest) if query: m_url += '?%s' % query return m_url if 'm3u8' not in skip_protocols: formats.extend(self._extract_m3u8_formats( manifest_url('playlist.m3u8'), video_id, 'mp4', m3u8_entry_protocol, m3u8_id='hls', fatal=False)) if 'f4m' not in skip_protocols: formats.extend(self._extract_f4m_formats( manifest_url('manifest.f4m'), video_id, f4m_id='hds', fatal=False)) if 'dash' not in skip_protocols: formats.extend(self._extract_mpd_formats( manifest_url('manifest.mpd'), video_id, mpd_id='dash', fatal=False)) if re.search(r'(?:/smil:|\.smil)', url_base): if 'smil' not in skip_protocols: rtmp_formats = self._extract_smil_formats( manifest_url('jwplayer.smil'), video_id, fatal=False) for rtmp_format in rtmp_formats: rtsp_format = rtmp_format.copy() rtsp_format['url'] = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path']) del rtsp_format['play_path'] del rtsp_format['ext'] rtsp_format.update({ 'url': rtsp_format['url'].replace('rtmp://', 'rtsp://'), 'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'), 'protocol': 'rtsp', }) formats.extend([rtmp_format, rtsp_format]) else: for protocol in ('rtmp', 'rtsp'): if protocol not in skip_protocols: formats.append({ 'url': '%s:%s' % (protocol, url_base), 'format_id': protocol, 'protocol': protocol, }) return formats def _find_jwplayer_data(self, webpage, video_id=None, transform_source=js_to_json): mobj = re.search( r'(?s)jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)(?!</script>).*?\.setup\s*\((?P<options>[^)]+)\)', webpage) if mobj: try: jwplayer_data = self._parse_json(mobj.group('options'), video_id=video_id, transform_source=transform_source) except ExtractorError: pass else: if isinstance(jwplayer_data, dict): return jwplayer_data def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs): jwplayer_data = self._find_jwplayer_data( webpage, video_id, transform_source=js_to_json) return self._parse_jwplayer_data( jwplayer_data, video_id, *args, **kwargs) def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True, m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None): # JWPlayer backward compatibility: flattened playlists # https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96 if 'playlist' not in jwplayer_data: jwplayer_data = {'playlist': [jwplayer_data]} entries = [] # JWPlayer backward compatibility: single playlist item # https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10 if not isinstance(jwplayer_data['playlist'], list): jwplayer_data['playlist'] = [jwplayer_data['playlist']] for video_data in jwplayer_data['playlist']: # JWPlayer backward compatibility: flattened sources # https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35 if 'sources' not in video_data: video_data['sources'] = [video_data] this_video_id = video_id or video_data['mediaid'] formats = self._parse_jwplayer_formats( video_data['sources'], video_id=this_video_id, m3u8_id=m3u8_id, mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url) subtitles = {} tracks = video_data.get('tracks') if tracks and isinstance(tracks, list): for track in tracks: if not isinstance(track, dict): continue if track.get('kind') != 'captions': continue track_url = urljoin(base_url, track.get('file')) if not track_url: continue subtitles.setdefault(track.get('label') or 'en', []).append({ 'url': self._proto_relative_url(track_url) }) entry = { 'id': this_video_id, 'title': unescapeHTML(video_data['title'] if require_title else video_data.get('title')), 'description': video_data.get('description'), 'thumbnail': self._proto_relative_url(video_data.get('image')), 'timestamp': int_or_none(video_data.get('pubdate')), 'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')), 'subtitles': subtitles, } # https://github.com/jwplayer/jwplayer/blob/master/src/js/utils/validator.js#L32 if len(formats) == 1 and re.search(r'^(?:http|//).*(?:youtube\.com|youtu\.be)/.+', formats[0]['url']): entry.update({ '_type': 'url_transparent', 'url': formats[0]['url'], }) else: self._sort_formats(formats) entry['formats'] = formats entries.append(entry) if len(entries) == 1: return entries[0] else: return self.playlist_result(entries) def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None, m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None): urls = [] formats = [] for source in jwplayer_sources_data: if not isinstance(source, dict): continue source_url = self._proto_relative_url(source.get('file')) if not source_url: continue if base_url: source_url = compat_urlparse.urljoin(base_url, source_url) if source_url in urls: continue urls.append(source_url) source_type = source.get('type') or '' ext = mimetype2ext(source_type) or determine_ext(source_url) if source_type == 'hls' or ext == 'm3u8': formats.extend(self._extract_m3u8_formats( source_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id=m3u8_id, fatal=False)) elif ext == 'mpd': formats.extend(self._extract_mpd_formats( source_url, video_id, mpd_id=mpd_id, fatal=False)) elif ext == 'smil': formats.extend(self._extract_smil_formats( source_url, video_id, fatal=False)) # https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67 elif source_type.startswith('audio') or ext in ( 'oga', 'aac', 'mp3', 'mpeg', 'vorbis'): formats.append({ 'url': source_url, 'vcodec': 'none', 'ext': ext, }) else: height = int_or_none(source.get('height')) if height is None: # Often no height is provided but there is a label in # format like "1080p", "720p SD", or 1080. height = int_or_none(self._search_regex( r'^(\d{3,4})[pP]?(?:\b|$)', compat_str(source.get('label') or ''), 'height', default=None)) a_format = { 'url': source_url, 'width': int_or_none(source.get('width')), 'height': height, 'tbr': int_or_none(source.get('bitrate')), 'ext': ext, } if source_url.startswith('rtmp'): a_format['ext'] = 'flv' # See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as # of jwplayer.flash.swf rtmp_url_parts = re.split( r'((?:mp4|mp3|flv):)', source_url, 1) if len(rtmp_url_parts) == 3: rtmp_url, prefix, play_path = rtmp_url_parts a_format.update({ 'url': rtmp_url, 'play_path': prefix + play_path, }) if rtmp_params: a_format.update(rtmp_params) formats.append(a_format) return formats def _live_title(self, name): """ Generate the title for a live video """ now = datetime.datetime.now() now_str = now.strftime('%Y-%m-%d %H:%M') return name + ' ' + now_str def _int(self, v, name, fatal=False, **kwargs): res = int_or_none(v, **kwargs) if 'get_attr' in kwargs: print(getattr(v, kwargs['get_attr'])) if res is None: msg = 'Failed to extract %s: Could not parse value %r' % (name, v) if fatal: raise ExtractorError(msg) else: self._downloader.report_warning(msg) return res def _float(self, v, name, fatal=False, **kwargs): res = float_or_none(v, **kwargs) if res is None: msg = 'Failed to extract %s: Could not parse value %r' % (name, v) if fatal: raise ExtractorError(msg) else: self._downloader.report_warning(msg) return res def _set_cookie(self, domain, name, value, expire_time=None, port=None, path='/', secure=False, discard=False, rest={}, **kwargs): cookie = compat_cookiejar.Cookie( 0, name, value, port, port is not None, domain, True, domain.startswith('.'), path, True, secure, expire_time, discard, None, None, rest) self._downloader.cookiejar.set_cookie(cookie) def _get_cookies(self, url): """ Return a compat_cookies.SimpleCookie with the cookies for the url """ req = sanitized_Request(url) self._downloader.cookiejar.add_cookie_header(req) return compat_cookies.SimpleCookie(req.get_header('Cookie')) def get_testcases(self, include_onlymatching=False): t = getattr(self, '_TEST', None) if t: assert not hasattr(self, '_TESTS'), \ '%s has _TEST and _TESTS' % type(self).__name__ tests = [t] else: tests = getattr(self, '_TESTS', []) for t in tests: if not include_onlymatching and t.get('only_matching', False): continue t['name'] = type(self).__name__[:-len('IE')] yield t def is_suitable(self, age_limit): """ Test whether the extractor is generally suitable for the given age limit (i.e. pornographic sites are not, all others usually are) """ any_restricted = False for tc in self.get_testcases(include_onlymatching=False): if tc.get('playlist', []): tc = tc['playlist'][0] is_restricted = age_restricted( tc.get('info_dict', {}).get('age_limit'), age_limit) if not is_restricted: return True any_restricted = any_restricted or is_restricted return not any_restricted def extract_subtitles(self, *args, **kwargs): if (self._downloader.params.get('writesubtitles', False) or self._downloader.params.get('listsubtitles')): return self._get_subtitles(*args, **kwargs) return {} def _get_subtitles(self, *args, **kwargs): raise NotImplementedError('This method must be implemented by subclasses') @staticmethod def _merge_subtitle_items(subtitle_list1, subtitle_list2): """ Merge subtitle items for one language. Items with duplicated URLs will be dropped. """ list1_urls = set([item['url'] for item in subtitle_list1]) ret = list(subtitle_list1) ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls]) return ret @classmethod def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2): """ Merge two subtitle dictionaries, language by language. """ ret = dict(subtitle_dict1) for lang in subtitle_dict2: ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang]) return ret def extract_automatic_captions(self, *args, **kwargs): if (self._downloader.params.get('writeautomaticsub', False) or self._downloader.params.get('listsubtitles')): return self._get_automatic_captions(*args, **kwargs) return {} def _get_automatic_captions(self, *args, **kwargs): raise NotImplementedError('This method must be implemented by subclasses') def mark_watched(self, *args, **kwargs): if (self._downloader.params.get('mark_watched', False) and (self._get_login_info()[0] is not None or self._downloader.params.get('cookiefile') is not None)): self._mark_watched(*args, **kwargs) def _mark_watched(self, *args, **kwargs): raise NotImplementedError('This method must be implemented by subclasses') def geo_verification_headers(self): headers = {} geo_verification_proxy = self._downloader.params.get('geo_verification_proxy') if geo_verification_proxy: headers['Ytdl-request-proxy'] = geo_verification_proxy return headers def _generic_id(self, url): return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0]) def _generic_title(self, url): return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0]) class SearchInfoExtractor(InfoExtractor): """ Base class for paged search queries extractors. They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query} Instances should define _SEARCH_KEY and _MAX_RESULTS. """ @classmethod def _make_valid_url(cls): return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY @classmethod def suitable(cls, url): return re.match(cls._make_valid_url(), url) is not None def _real_extract(self, query): mobj = re.match(self._make_valid_url(), query) if mobj is None: raise ExtractorError('Invalid search query "%s"' % query) prefix = mobj.group('prefix') query = mobj.group('query') if prefix == '': return self._get_n_results(query, 1) elif prefix == 'all': return self._get_n_results(query, self._MAX_RESULTS) else: n = int(prefix) if n <= 0: raise ExtractorError('invalid download number %s for query "%s"' % (n, query)) elif n > self._MAX_RESULTS: self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n)) n = self._MAX_RESULTS return self._get_n_results(query, n) def _get_n_results(self, query, n): """Get a specified number of results for a query""" raise NotImplementedError('This method must be implemented by subclasses') @property def SEARCH_KEY(self): return self._SEARCH_KEY # coding: utf-8 from __future__ import unicode_literals import base64 import datetime import hashlib import json import netrc import os import random import re import socket import sys import time import math from ..compat import ( compat_cookiejar, compat_cookies, compat_etree_fromstring, compat_getpass, compat_http_client, compat_os_name, compat_str, compat_urllib_error, compat_urllib_parse_unquote, compat_urllib_parse_urlencode, compat_urllib_request, compat_urlparse, compat_xml_parse_error, ) from ..downloader.f4m import ( get_base_url, remove_encrypted_media, ) from ..utils import ( NO_DEFAULT, age_restricted, base_url, bug_reports_message, clean_html, compiled_regex_type, determine_ext, determine_protocol, error_to_compat_str, ExtractorError, extract_attributes, fix_xml_ampersands, float_or_none, GeoRestrictedError, GeoUtils, int_or_none, js_to_json, mimetype2ext, orderedSet, parse_codecs, parse_duration, parse_iso8601, parse_m3u8_attributes, RegexNotFoundError, sanitized_Request, sanitize_filename, unescapeHTML, unified_strdate, unified_timestamp, update_Request, update_url_query, urljoin, url_basename, xpath_element, xpath_text, xpath_with_ns, ) class InfoExtractor(object): """Information Extractor class. Information extractors are the classes that, given a URL, extract information about the video (or videos) the URL refers to. This information includes the real video URL, the video title, author and others. The information is stored in a dictionary which is then passed to the YoutubeDL. The YoutubeDL processes this information possibly downloading the video to the file system, among other possible outcomes. The type field determines the type of the result. By far the most common value (and the default if _type is missing) is "video", which indicates a single video. For a video, the dictionaries must include the following fields: id: Video identifier. title: Video title, unescaped. Additionally, it must contain either a formats entry or a url one: formats: A list of dictionaries for each format available, ordered from worst to best quality. Potential fields: * url Mandatory. The URL of the video file * manifest_url The URL of the manifest file in case of fragmented media (DASH, hls, hds) * ext Will be calculated from URL if missing * format A human-readable description of the format ("mp4 container with h264/opus"). Calculated from the format_id, width, height. and format_note fields if missing. * format_id A short description of the format ("mp4_h264_opus" or "19"). Technically optional, but strongly recommended. * format_note Additional info about the format ("3D" or "DASH video") * width Width of the video, if known * height Height of the video, if known * resolution Textual description of width and height * tbr Average bitrate of audio and video in KBit/s * abr Average audio bitrate in KBit/s * acodec Name of the audio codec in use * asr Audio sampling rate in Hertz * vbr Average video bitrate in KBit/s * fps Frame rate * vcodec Name of the video codec in use * container Name of the container format * filesize The number of bytes, if known in advance * filesize_approx An estimate for the number of bytes * player_url SWF Player URL (used for rtmpdump). * protocol The protocol that will be used for the actual download, lower-case. "http", "https", "rtsp", "rtmp", "rtmpe", "m3u8", "m3u8_native" or "http_dash_segments". * fragment_base_url Base URL for fragments. Each fragment's path value (if present) will be relative to this URL. * fragments A list of fragments of a fragmented media. Each fragment entry must contain either an url or a path. If an url is present it should be considered by a client. Otherwise both path and fragment_base_url must be present. Here is the list of all potential fields: * "url" - fragment's URL * "path" - fragment's path relative to fragment_base_url * "duration" (optional, int or float) * "filesize" (optional, int) * preference Order number of this format. If this field is present and not None, the formats get sorted by this field, regardless of all other values. -1 for default (order by other properties), -2 or smaller for less than default. < -1000 to hide the format (if there is another one which is strictly better) * language Language code, e.g. "de" or "en-US". * language_preference Is this in the language mentioned in the URL? 10 if it's what the URL is about, -1 for default (don't know), -10 otherwise, other values reserved for now. * quality Order number of the video quality of this format, irrespective of the file format. -1 for default (order by other properties), -2 or smaller for less than default. * source_preference Order number for this video source (quality takes higher priority) -1 for default (order by other properties), -2 or smaller for less than default. * http_headers A dictionary of additional HTTP headers to add to the request. * stretched_ratio If given and not 1, indicates that the video's pixels are not square. width : height ratio as float. * no_resume The server does not support resuming the (HTTP or RTMP) download. Boolean. url: Final video URL. ext: Video filename extension. format: The video format, defaults to ext (used for --get-format) player_url: SWF Player URL (used for rtmpdump). The following fields are optional: alt_title: A secondary title of the video. display_id An alternative identifier for the video, not necessarily unique, but available before title. Typically, id is something like "4234987", title "Dancing naked mole rats", and display_id "dancing-naked-mole-rats" thumbnails: A list of dictionaries, with the following entries: * "id" (optional, string) - Thumbnail format ID * "url" * "preference" (optional, int) - quality of the image * "width" (optional, int) * "height" (optional, int) * "resolution" (optional, string "{width}x{height"}, deprecated) * "filesize" (optional, int) thumbnail: Full URL to a video thumbnail image. description: Full video description. uploader: Full name of the video uploader. license: License name the video is licensed under. creator: The creator of the video. release_date: The date (YYYYMMDD) when the video was released. timestamp: UNIX timestamp of the moment the video became available. upload_date: Video upload date (YYYYMMDD). If not explicitly set, calculated from timestamp. uploader_id: Nickname or id of the video uploader. uploader_url: Full URL to a personal webpage of the video uploader. location: Physical location where the video was filmed. subtitles: The available subtitles as a dictionary in the format {tag: subformats}. "tag" is usually a language code, and "subformats" is a list sorted from lower to higher preference, each element is a dictionary with the "ext" entry and one of: * "data": The subtitles file contents * "url": A URL pointing to the subtitles file "ext" will be calculated from URL if missing automatic_captions: Like 'subtitles', used by the YoutubeIE for automatically generated captions duration: Length of the video in seconds, as an integer or float. view_count: How many users have watched the video on the platform. like_count: Number of positive ratings of the video dislike_count: Number of negative ratings of the video repost_count: Number of reposts of the video average_rating: Average rating give by users, the scale used depends on the webpage comment_count: Number of comments on the video comments: A list of comments, each with one or more of the following properties (all but one of text or html optional): * "author" - human-readable name of the comment author * "author_id" - user ID of the comment author * "id" - Comment ID * "html" - Comment as HTML * "text" - Plain text of the comment * "timestamp" - UNIX timestamp of comment * "parent" - ID of the comment this one is replying to. Set to "root" to indicate that this is a comment to the original video. age_limit: Age restriction for the video, as an integer (years) webpage_url: The URL to the video webpage, if given to youtube-dl it should allow to get the same result again. (It will be set by YoutubeDL if it's missing) categories: A list of categories that the video falls in, for example ["Sports", "Berlin"] tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"] is_live: True, False, or None (=unknown). Whether this video is a live stream that goes on instead of a fixed-length video. start_time: Time in seconds where the reproduction should start, as specified in the URL. end_time: Time in seconds where the reproduction should end, as specified in the URL. chapters: A list of dictionaries, with the following entries: * "start_time" - The start time of the chapter in seconds * "end_time" - The end time of the chapter in seconds * "title" (optional, string) The following fields should only be used when the video belongs to some logical chapter or section: chapter: Name or title of the chapter the video belongs to. chapter_number: Number of the chapter the video belongs to, as an integer. chapter_id: Id of the chapter the video belongs to, as a unicode string. The following fields should only be used when the video is an episode of some series, programme or podcast: series: Title of the series or programme the video episode belongs to. season: Title of the season the video episode belongs to. season_number: Number of the season the video episode belongs to, as an integer. season_id: Id of the season the video episode belongs to, as a unicode string. episode: Title of the video episode. Unlike mandatory video title field, this field should denote the exact title of the video episode without any kind of decoration. episode_number: Number of the video episode within a season, as an integer. episode_id: Id of the video episode, as a unicode string. The following fields should only be used when the media is a track or a part of a music album: track: Title of the track. track_number: Number of the track within an album or a disc, as an integer. track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii), as a unicode string. artist: Artist(s) of the track. genre: Genre(s) of the track. album: Title of the album the track belongs to. album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc). album_artist: List of all artists appeared on the album (e.g. "Ash Borer / Fell Voices" or "Various Artists", useful for splits and compilations). disc_number: Number of the disc or other physical medium the track belongs to, as an integer. release_year: Year (YYYY) when the album was released. Unless mentioned otherwise, the fields should be Unicode strings. Unless mentioned otherwise, None is equivalent to absence of information. _type "playlist" indicates multiple videos. There must be a key "entries", which is a list, an iterable, or a PagedList object, each element of which is a valid dictionary by this specification. Additionally, playlists can have "id", "title", "description", "uploader", "uploader_id", "uploader_url" attributes with the same semantics as videos (see above). _type "multi_video" indicates that there are multiple videos that form a single show, for examples multiple acts of an opera or TV episode. It must have an entries key like a playlist and contain all the keys required for a video at the same time. _type "url" indicates that the video must be extracted from another location, possibly by a different extractor. Its only required key is: "url" - the next URL to extract. The key "ie_key" can be set to the class name (minus the trailing "IE", e.g. "Youtube") if the extractor class is known in advance. Additionally, the dictionary may have any properties of the resolved entity known in advance, for example "title" if the title of the referred video is known ahead of time. _type "url_transparent" entities have the same specification as "url", but indicate that the given additional information is more precise than the one associated with the resolved URL. This is useful when a site employs a video service that hosts the video and its technical metadata, but that video service does not embed a useful title, description etc. Subclasses of this one should re-define the _real_initialize() and _real_extract() methods and define a _VALID_URL regexp. Probably, they should also be added to the list of extractors. _GEO_BYPASS attribute may be set to False in order to disable geo restriction bypass mechanisms for a particular extractor. Though it won't disable explicit geo restriction bypass based on country code provided with geo_bypass_country. (experimental) _GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted countries for this extractor. One of these countries will be used by geo restriction bypass mechanism right away in order to bypass geo restriction, of course, if the mechanism is not disabled. (experimental) NB: both these geo attributes are experimental and may change in future or be completely removed. Finally, the _WORKING attribute should be set to False for broken IEs in order to warn the users and skip the tests. """ _ready = False _downloader = None _x_forwarded_for_ip = None _GEO_BYPASS = True _GEO_COUNTRIES = None _WORKING = True def __init__(self, downloader=None): """Constructor. Receives an optional downloader.""" self._ready = False self._x_forwarded_for_ip = None self.set_downloader(downloader) @classmethod def suitable(cls, url): """Receives a URL and returns True if suitable for this IE.""" # This does not use has/getattr intentionally - we want to know whether # we have cached the regexp for *this* class, whereas getattr would also # match the superclass if '_VALID_URL_RE' not in cls.__dict__: cls._VALID_URL_RE = re.compile(cls._VALID_URL) return cls._VALID_URL_RE.match(url) is not None @classmethod def _match_id(cls, url): if '_VALID_URL_RE' not in cls.__dict__: cls._VALID_URL_RE = re.compile(cls._VALID_URL) m = cls._VALID_URL_RE.match(url) assert m return compat_str(m.group('id')) @classmethod def working(cls): """Getter method for _WORKING.""" return cls._WORKING def initialize(self): """Initializes an instance (authentication, etc).""" self._initialize_geo_bypass(self._GEO_COUNTRIES) if not self._ready: self._real_initialize() self._ready = True def _initialize_geo_bypass(self, countries): """ Initialize geo restriction bypass mechanism. This method is used to initialize geo bypass mechanism based on faking X-Forwarded-For HTTP header. A random country from provided country list is selected and a random IP belonging to this country is generated. This IP will be passed as X-Forwarded-For HTTP header in all subsequent HTTP requests. This method will be used for initial geo bypass mechanism initialization during the instance initialization with _GEO_COUNTRIES. You may also manually call it from extractor's code if geo countries information is not available beforehand (e.g. obtained during extraction) or due to some another reason. """ if not self._x_forwarded_for_ip: country_code = self._downloader.params.get('geo_bypass_country', None) # If there is no explicit country for geo bypass specified and # the extractor is known to be geo restricted let's fake IP # as X-Forwarded-For right away. if (not country_code and self._GEO_BYPASS and self._downloader.params.get('geo_bypass', True) and countries): country_code = random.choice(countries) if country_code: self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code) if self._downloader.params.get('verbose', False): self._downloader.to_screen( '[debug] Using fake IP %s (%s) as X-Forwarded-For.' % (self._x_forwarded_for_ip, country_code.upper())) def extract(self, url): """Extracts URL information and returns it in list of dicts.""" try: for _ in range(2): try: self.initialize() ie_result = self._real_extract(url) if self._x_forwarded_for_ip: ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip return ie_result except GeoRestrictedError as e: if self.__maybe_fake_ip_and_retry(e.countries): continue raise except ExtractorError: raise except compat_http_client.IncompleteRead as e: raise ExtractorError('A network error has occurred.', cause=e, expected=True) except (KeyError, StopIteration) as e: raise ExtractorError('An extractor error has occurred.', cause=e) def __maybe_fake_ip_and_retry(self, countries): if (not self._downloader.params.get('geo_bypass_country', None) and self._GEO_BYPASS and self._downloader.params.get('geo_bypass', True) and not self._x_forwarded_for_ip and countries): country_code = random.choice(countries) self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code) if self._x_forwarded_for_ip: self.report_warning( 'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.' % (self._x_forwarded_for_ip, country_code.upper())) return True return False def set_downloader(self, downloader): """Sets the downloader for this IE.""" self._downloader = downloader def _real_initialize(self): """Real initialization process. Redefine in subclasses.""" pass def _real_extract(self, url): """Real extraction process. Redefine in subclasses.""" pass @classmethod def ie_key(cls): """A string for getting the InfoExtractor with get_info_extractor""" return compat_str(cls.__name__[:-2]) @property def IE_NAME(self): return compat_str(type(self).__name__[:-2]) def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}): """ Returns the response handle """ if note is None: self.report_download_webpage(video_id) elif note is not False: if video_id is None: self.to_screen('%s' % (note,)) else: self.to_screen('%s: %s' % (video_id, note)) # Some sites check X-Forwarded-For HTTP header in order to figure out # the origin of the client behind proxy. This allows bypassing geo # restriction by faking this header's value to IP that belongs to some # geo unrestricted country. We will do so once we encounter any # geo restriction error. if self._x_forwarded_for_ip: if 'X-Forwarded-For' not in headers: headers['X-Forwarded-For'] = self._x_forwarded_for_ip if isinstance(url_or_request, compat_urllib_request.Request): url_or_request = update_Request( url_or_request, data=data, headers=headers, query=query) else: if query: url_or_request = update_url_query(url_or_request, query) if data is not None or headers: url_or_request = sanitized_Request(url_or_request, data, headers) try: return self._downloader.urlopen(url_or_request) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: if errnote is False: return False if errnote is None: errnote = 'Unable to download webpage' errmsg = '%s: %s' % (errnote, error_to_compat_str(err)) if fatal: raise ExtractorError(errmsg, sys.exc_info()[2], cause=err) else: self._downloader.report_warning(errmsg) return False def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}): """ Returns a tuple (page content as string, URL handle) """ # Strip hashes from the URL (#1038) if isinstance(url_or_request, (compat_str, str)): url_or_request = url_or_request.partition('#')[0] urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query) if urlh is False: assert not fatal return False content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding) return (content, urlh) @staticmethod def _guess_encoding_from_content(content_type, webpage_bytes): m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type) if m: encoding = m.group(1) else: m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]', webpage_bytes[:1024]) if m: encoding = m.group(1).decode('ascii') elif webpage_bytes.startswith(b'\xff\xfe'): encoding = 'utf-16' else: encoding = 'utf-8' return encoding def __check_blocked(self, content): first_block = content[:512] if ('<title>Access to this site is blocked</title>' in content and 'Websense' in first_block): msg = 'Access to this webpage has been blocked by Websense filtering software in your network.' blocked_iframe = self._html_search_regex( r'<iframe src="([^"]+)"', content, 'Websense information URL', default=None) if blocked_iframe: msg += ' Visit %s for more details' % blocked_iframe raise ExtractorError(msg, expected=True) if '<title>The URL you requested has been blocked</title>' in first_block: msg = ( 'Access to this webpage has been blocked by Indian censorship. ' 'Use a VPN or proxy server (with --proxy) to route around it.') block_msg = self._html_search_regex( r'</h1><p>(.*?)</p>', content, 'block message', default=None) if block_msg: msg += ' (Message: "%s")' % block_msg.replace('\n', ' ') raise ExtractorError(msg, expected=True) if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content and 'blocklist.rkn.gov.ru' in content): raise ExtractorError( 'Access to this webpage has been blocked by decision of the Russian government. ' 'Visit http://blocklist.rkn.gov.ru/ for a block reason.', expected=True) def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None): content_type = urlh.headers.get('Content-Type', '') webpage_bytes = urlh.read() if prefix is not None: webpage_bytes = prefix + webpage_bytes if not encoding: encoding = self._guess_encoding_from_content(content_type, webpage_bytes) if self._downloader.params.get('dump_intermediate_pages', False): self.to_screen('Dumping request to ' + urlh.geturl()) dump = base64.b64encode(webpage_bytes).decode('ascii') self._downloader.to_screen(dump) if self._downloader.params.get('write_pages', False): basen = '%s_%s' % (video_id, urlh.geturl()) if len(basen) > 240: h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest() basen = basen[:240 - len(h)] + h raw_filename = basen + '.dump' filename = sanitize_filename(raw_filename, restricted=True) self.to_screen('Saving request to ' + filename) # Working around MAX_PATH limitation on Windows (see # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) if compat_os_name == 'nt': absfilepath = os.path.abspath(filename) if len(absfilepath) > 259: filename = '\\\\?\\' + absfilepath with open(filename, 'wb') as outf: outf.write(webpage_bytes) try: content = webpage_bytes.decode(encoding, 'replace') except LookupError: content = webpage_bytes.decode('utf-8', 'replace') self.__check_blocked(content) return content def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None, data=None, headers={}, query={}): """ Returns the data of the page as a string """ success = False try_count = 0 while success is False: try: res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding, data=data, headers=headers, query=query) success = True except compat_http_client.IncompleteRead as e: try_count += 1 if try_count >= tries: raise e self._sleep(timeout, video_id) if res is False: return res else: content, _ = res return content def _download_xml(self, url_or_request, video_id, note='Downloading XML', errnote='Unable to download XML', transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}): """Return the xml as an xml.etree.ElementTree.Element""" xml_string = self._download_webpage( url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query) if xml_string is False: return xml_string return self._parse_xml( xml_string, video_id, transform_source=transform_source, fatal=fatal) def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True): if transform_source: xml_string = transform_source(xml_string) try: return compat_etree_fromstring(xml_string.encode('utf-8')) except compat_xml_parse_error as ve: errmsg = '%s: Failed to parse XML ' % video_id if fatal: raise ExtractorError(errmsg, cause=ve) else: self.report_warning(errmsg + str(ve)) def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata', errnote='Unable to download JSON metadata', transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}): json_string = self._download_webpage( url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query) if (not fatal) and json_string is False: return None return self._parse_json( json_string, video_id, transform_source=transform_source, fatal=fatal) def _parse_json(self, json_string, video_id, transform_source=None, fatal=True): if transform_source: json_string = transform_source(json_string) try: return json.loads(json_string) except ValueError as ve: errmsg = '%s: Failed to parse JSON ' % video_id if fatal: raise ExtractorError(errmsg, cause=ve) else: self.report_warning(errmsg + str(ve)) def report_warning(self, msg, video_id=None): idstr = '' if video_id is None else '%s: ' % video_id self._downloader.report_warning( '[%s] %s%s' % (self.IE_NAME, idstr, msg)) def to_screen(self, msg): """Print msg to screen, prefixing it with '[ie_name]'""" self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg)) def report_extraction(self, id_or_name): """Report information extraction.""" self.to_screen('%s: Extracting information' % id_or_name) def report_download_webpage(self, video_id): """Report webpage download.""" self.to_screen('%s: Downloading webpage' % video_id) def report_age_confirmation(self): """Report attempt to confirm age.""" self.to_screen('Confirming age') def report_login(self): """Report attempt to log in.""" self.to_screen('Logging in') @staticmethod def raise_login_required(msg='This video is only available for registered users'): raise ExtractorError( '%s. Use --username and --password or --netrc to provide account credentials.' % msg, expected=True) @staticmethod def raise_geo_restricted(msg='This video is not available from your location due to geo restriction', countries=None): raise GeoRestrictedError(msg, countries=countries) # Methods for following #608 @staticmethod def url_result(url, ie=None, video_id=None, video_title=None): """Returns a URL that points to a page that should be processed""" # TODO: ie should be the class used for getting the info video_info = {'_type': 'url', 'url': url, 'ie_key': ie} if video_id is not None: video_info['id'] = video_id if video_title is not None: video_info['title'] = video_title return video_info def playlist_from_matches(self, matches, playlist_id=None, playlist_title=None, getter=None, ie=None): urls = orderedSet( self.url_result(self._proto_relative_url(getter(m) if getter else m), ie) for m in matches) return self.playlist_result( urls, playlist_id=playlist_id, playlist_title=playlist_title) @staticmethod def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None): """Returns a playlist""" video_info = {'_type': 'playlist', 'entries': entries} if playlist_id: video_info['id'] = playlist_id if playlist_title: video_info['title'] = playlist_title if playlist_description: video_info['description'] = playlist_description return video_info def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None): """ Perform a regex search on the given string, using a single or a list of patterns returning the first matching group. In case of failure return a default value or raise a WARNING or a RegexNotFoundError, depending on fatal, specifying the field name. """ if isinstance(pattern, (str, compat_str, compiled_regex_type)): mobj = re.search(pattern, string, flags) else: for p in pattern: mobj = re.search(p, string, flags) if mobj: break if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty(): _name = '\033[0;34m%s\033[0m' % name else: _name = name if mobj: if group is None: # return the first matching group return next(g for g in mobj.groups() if g is not None) else: return mobj.group(group) elif default is not NO_DEFAULT: return default elif fatal: raise RegexNotFoundError('Unable to extract %s' % _name) else: self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message()) return None def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None): """ Like _search_regex, but strips HTML tags and unescapes entities. """ res = self._search_regex(pattern, string, name, default, fatal, flags, group) if res: return clean_html(res).strip() else: return res def _get_netrc_login_info(self, netrc_machine=None): username = None password = None netrc_machine = netrc_machine or self._NETRC_MACHINE if self._downloader.params.get('usenetrc', False): try: info = netrc.netrc().authenticators(netrc_machine) if info is not None: username = info[0] password = info[2] else: raise netrc.NetrcParseError( 'No authenticators for %s' % netrc_machine) except (IOError, netrc.NetrcParseError) as err: self._downloader.report_warning( 'parsing .netrc: %s' % error_to_compat_str(err)) return username, password def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None): """ Get the login info as (username, password) First look for the manually specified credentials using username_option and password_option as keys in params dictionary. If no such credentials available look in the netrc file using the netrc_machine or _NETRC_MACHINE value. If there's no info available, return (None, None) """ if self._downloader is None: return (None, None) downloader_params = self._downloader.params # Attempt to use provided username and password or .netrc data if downloader_params.get(username_option) is not None: username = downloader_params[username_option] password = downloader_params[password_option] else: username, password = self._get_netrc_login_info(netrc_machine) return username, password def _get_tfa_info(self, note='two-factor verification code'): """ Get the two-factor authentication info TODO - asking the user will be required for sms/phone verify currently just uses the command line option If there's no info available, return None """ if self._downloader is None: return None downloader_params = self._downloader.params if downloader_params.get('twofactor') is not None: return downloader_params['twofactor'] return compat_getpass('Type %s and press [Return]: ' % note) # Helper functions for extracting OpenGraph info @staticmethod def _og_regexes(prop): content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))' property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)' % {'prop': re.escape(prop)}) template = r'<meta[^>]+?%s[^>]+?%s' return [ template % (property_re, content_re), template % (content_re, property_re), ] @staticmethod def _meta_regex(prop): return r'''(?isx)<meta (?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1) [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop) def _og_search_property(self, prop, html, name=None, **kargs): if not isinstance(prop, (list, tuple)): prop = [prop] if name is None: name = 'OpenGraph %s' % prop[0] og_regexes = [] for p in prop: og_regexes.extend(self._og_regexes(p)) escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs) if escaped is None: return None return unescapeHTML(escaped) def _og_search_thumbnail(self, html, **kargs): return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs) def _og_search_description(self, html, **kargs): return self._og_search_property('description', html, fatal=False, **kargs) def _og_search_title(self, html, **kargs): return self._og_search_property('title', html, **kargs) def _og_search_video_url(self, html, name='video url', secure=True, **kargs): regexes = self._og_regexes('video') + self._og_regexes('video:url') if secure: regexes = self._og_regexes('video:secure_url') + regexes return self._html_search_regex(regexes, html, name, **kargs) def _og_search_url(self, html, **kargs): return self._og_search_property('url', html, **kargs) def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs): if not isinstance(name, (list, tuple)): name = [name] if display_name is None: display_name = name[0] return self._html_search_regex( [self._meta_regex(n) for n in name], html, display_name, fatal=fatal, group='content', **kwargs) def _dc_search_uploader(self, html): return self._html_search_meta('dc.creator', html, 'uploader') def _rta_search(self, html): # See http://www.rtalabel.org/index.php?content=howtofaq#single if re.search(r'(?ix)<meta\s+name="rating"\s+' r' content="RTA-5042-1996-1400-1577-RTA"', html): return 18 return 0 def _media_rating_search(self, html): # See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/ rating = self._html_search_meta('rating', html) if not rating: return None RATING_TABLE = { 'safe for kids': 0, 'general': 8, '14 years': 14, 'mature': 17, 'restricted': 19, } return RATING_TABLE.get(rating.lower()) def _family_friendly_search(self, html): # See http://schema.org/VideoObject family_friendly = self._html_search_meta( 'isFamilyFriendly', html, default=None) if not family_friendly: return None RATING_TABLE = { '1': 0, 'true': 0, '0': 18, 'false': 18, } return RATING_TABLE.get(family_friendly.lower()) def _twitter_search_player(self, html): return self._html_search_meta('twitter:player', html, 'twitter card player') def _search_json_ld(self, html, video_id, expected_type=None, **kwargs): json_ld = self._search_regex( r'(?s)<script[^>]+type=(["\'])application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>', html, 'JSON-LD', group='json_ld', **kwargs) default = kwargs.get('default', NO_DEFAULT) if not json_ld: return default if default is not NO_DEFAULT else {} # JSON-LD may be malformed and thus `fatal` should be respected. # At the same time `default` may be passed that assumes `fatal=False` # for _search_regex. Let's simulate the same behavior here as well. fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False return self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type) def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None): if isinstance(json_ld, compat_str): json_ld = self._parse_json(json_ld, video_id, fatal=fatal) if not json_ld: return {} info = {} if not isinstance(json_ld, (list, tuple, dict)): return info if isinstance(json_ld, dict): json_ld = [json_ld] def extract_video_object(e): assert e['@type'] == 'VideoObject' info.update({ 'url': e.get('contentUrl'), 'title': unescapeHTML(e.get('name')), 'description': unescapeHTML(e.get('description')), 'thumbnail': e.get('thumbnailUrl') or e.get('thumbnailURL'), 'duration': parse_duration(e.get('duration')), 'timestamp': unified_timestamp(e.get('uploadDate')), 'filesize': float_or_none(e.get('contentSize')), 'tbr': int_or_none(e.get('bitrate')), 'width': int_or_none(e.get('width')), 'height': int_or_none(e.get('height')), 'view_count': int_or_none(e.get('interactionCount')), }) for e in json_ld: if e.get('@context') == 'http://schema.org': item_type = e.get('@type') if expected_type is not None and expected_type != item_type: return info if item_type in ('TVEpisode', 'Episode'): info.update({ 'episode': unescapeHTML(e.get('name')), 'episode_number': int_or_none(e.get('episodeNumber')), 'description': unescapeHTML(e.get('description')), }) part_of_season = e.get('partOfSeason') if isinstance(part_of_season, dict) and part_of_season.get('@type') in ('TVSeason', 'Season', 'CreativeWorkSeason'): info['season_number'] = int_or_none(part_of_season.get('seasonNumber')) part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries') if isinstance(part_of_series, dict) and part_of_series.get('@type') in ('TVSeries', 'Series', 'CreativeWorkSeries'): info['series'] = unescapeHTML(part_of_series.get('name')) elif item_type == 'Article': info.update({ 'timestamp': parse_iso8601(e.get('datePublished')), 'title': unescapeHTML(e.get('headline')), 'description': unescapeHTML(e.get('articleBody')), }) elif item_type == 'VideoObject': extract_video_object(e) continue video = e.get('video') if isinstance(video, dict) and video.get('@type') == 'VideoObject': extract_video_object(video) break return dict((k, v) for k, v in info.items() if v is not None) @staticmethod def _hidden_inputs(html): html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html) hidden_inputs = {} for input in re.findall(r'(?i)(<input[^>]+>)', html): attrs = extract_attributes(input) if not input: continue if attrs.get('type') not in ('hidden', 'submit'): continue name = attrs.get('name') or attrs.get('id') value = attrs.get('value') if name and value is not None: hidden_inputs[name] = value return hidden_inputs def _form_hidden_inputs(self, form_id, html): form = self._search_regex( r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id, html, '%s form' % form_id, group='form') return self._hidden_inputs(form) def _sort_formats(self, formats, field_preference=None): if not formats: raise ExtractorError('No video formats found') for f in formats: # Automatically determine tbr when missing based on abr and vbr (improves # formats sorting in some cases) if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None: f['tbr'] = f['abr'] + f['vbr'] def _formats_key(f): # TODO remove the following workaround from ..utils import determine_ext if not f.get('ext') and 'url' in f: f['ext'] = determine_ext(f['url']) if isinstance(field_preference, (list, tuple)): return tuple( f.get(field) if f.get(field) is not None else ('' if field == 'format_id' else -1) for field in field_preference) preference = f.get('preference') if preference is None: preference = 0 if f.get('ext') in ['f4f', 'f4m']: # Not yet supported preference -= 0.5 protocol = f.get('protocol') or determine_protocol(f) proto_preference = 0 if protocol in ['http', 'https'] else (-0.5 if protocol == 'rtsp' else -0.1) if f.get('vcodec') == 'none': # audio only preference -= 50 if self._downloader.params.get('prefer_free_formats'): ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus'] else: ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a'] ext_preference = 0 try: audio_ext_preference = ORDER.index(f['ext']) except ValueError: audio_ext_preference = -1 else: if f.get('acodec') == 'none': # video only preference -= 40 if self._downloader.params.get('prefer_free_formats'): ORDER = ['flv', 'mp4', 'webm'] else: ORDER = ['webm', 'flv', 'mp4'] try: ext_preference = ORDER.index(f['ext']) except ValueError: ext_preference = -1 audio_ext_preference = 0 return ( preference, f.get('language_preference') if f.get('language_preference') is not None else -1, f.get('quality') if f.get('quality') is not None else -1, f.get('tbr') if f.get('tbr') is not None else -1, f.get('filesize') if f.get('filesize') is not None else -1, f.get('vbr') if f.get('vbr') is not None else -1, f.get('height') if f.get('height') is not None else -1, f.get('width') if f.get('width') is not None else -1, proto_preference, ext_preference, f.get('abr') if f.get('abr') is not None else -1, audio_ext_preference, f.get('fps') if f.get('fps') is not None else -1, f.get('filesize_approx') if f.get('filesize_approx') is not None else -1, f.get('source_preference') if f.get('source_preference') is not None else -1, f.get('format_id') if f.get('format_id') is not None else '', ) formats.sort(key=_formats_key) def _check_formats(self, formats, video_id): if formats: formats[:] = filter( lambda f: self._is_valid_url( f['url'], video_id, item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'), formats) @staticmethod def _remove_duplicate_formats(formats): format_urls = set() unique_formats = [] for f in formats: if f['url'] not in format_urls: format_urls.add(f['url']) unique_formats.append(f) formats[:] = unique_formats def _is_valid_url(self, url, video_id, item='video', headers={}): url = self._proto_relative_url(url, scheme='http:') # For now assume non HTTP(S) URLs always valid if not (url.startswith('http://') or url.startswith('https://')): return True try: self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers) return True except ExtractorError as e: if isinstance(e.cause, compat_urllib_error.URLError): self.to_screen( '%s: %s URL is invalid, skipping' % (video_id, item)) return False raise def http_scheme(self): """ Either "http:" or "https:", depending on the user's preferences """ return ( 'http:' if self._downloader.params.get('prefer_insecure', False) else 'https:') def _proto_relative_url(self, url, scheme=None): if url is None: return url if url.startswith('//'): if scheme is None: scheme = self.http_scheme() return scheme + url else: return url def _sleep(self, timeout, video_id, msg_template=None): if msg_template is None: msg_template = '%(video_id)s: Waiting for %(timeout)s seconds' msg = msg_template % {'video_id': video_id, 'timeout': timeout} self.to_screen(msg) time.sleep(timeout) def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None, transform_source=lambda s: fix_xml_ampersands(s).strip(), fatal=True, m3u8_id=None): manifest = self._download_xml( manifest_url, video_id, 'Downloading f4m manifest', 'Unable to download f4m manifest', # Some manifests may be malformed, e.g. prosiebensat1 generated manifests # (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244) transform_source=transform_source, fatal=fatal) if manifest is False: return [] return self._parse_f4m_formats( manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id, transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id) def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None, transform_source=lambda s: fix_xml_ampersands(s).strip(), fatal=True, m3u8_id=None): # currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0') if akamai_pv is not None and ';' in akamai_pv.text: playerVerificationChallenge = akamai_pv.text.split(';')[0] if playerVerificationChallenge.strip() != '': return [] formats = [] manifest_version = '1.0' media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media') if not media_nodes: manifest_version = '2.0' media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media') # Remove unsupported DRM protected media from final formats # rendition (see https://github.com/rg3/youtube-dl/issues/8573). media_nodes = remove_encrypted_media(media_nodes) if not media_nodes: return formats manifest_base_url = get_base_url(manifest) bootstrap_info = xpath_element( manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'], 'bootstrap info', default=None) vcodec = None mime_type = xpath_text( manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'], 'base URL', default=None) if mime_type and mime_type.startswith('audio/'): vcodec = 'none' for i, media_el in enumerate(media_nodes): tbr = int_or_none(media_el.attrib.get('bitrate')) width = int_or_none(media_el.attrib.get('width')) height = int_or_none(media_el.attrib.get('height')) format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)])) # If <bootstrapInfo> is present, the specified f4m is a # stream-level manifest, and only set-level manifests may refer to # external resources. See section 11.4 and section 4 of F4M spec if bootstrap_info is None: media_url = None # @href is introduced in 2.0, see section 11.6 of F4M spec if manifest_version == '2.0': media_url = media_el.attrib.get('href') if media_url is None: media_url = media_el.attrib.get('url') if not media_url: continue manifest_url = ( media_url if media_url.startswith('http://') or media_url.startswith('https://') else ((manifest_base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url)) # If media_url is itself a f4m manifest do the recursive extraction # since bitrates in parent manifest (this one) and media_url manifest # may differ leading to inability to resolve the format by requested # bitrate in f4m downloader ext = determine_ext(manifest_url) if ext == 'f4m': f4m_formats = self._extract_f4m_formats( manifest_url, video_id, preference=preference, f4m_id=f4m_id, transform_source=transform_source, fatal=fatal) # Sometimes stream-level manifest contains single media entry that # does not contain any quality metadata (e.g. http://matchtv.ru/#live-player). # At the same time parent's media entry in set-level manifest may # contain it. We will copy it from parent in such cases. if len(f4m_formats) == 1: f = f4m_formats[0] f.update({ 'tbr': f.get('tbr') or tbr, 'width': f.get('width') or width, 'height': f.get('height') or height, 'format_id': f.get('format_id') if not tbr else format_id, 'vcodec': vcodec, }) formats.extend(f4m_formats) continue elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats( manifest_url, video_id, 'mp4', preference=preference, m3u8_id=m3u8_id, fatal=fatal)) continue formats.append({ 'format_id': format_id, 'url': manifest_url, 'manifest_url': manifest_url, 'ext': 'flv' if bootstrap_info is not None else None, 'protocol': 'f4m', 'tbr': tbr, 'width': width, 'height': height, 'vcodec': vcodec, 'preference': preference, }) return formats def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None): return { 'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])), 'url': m3u8_url, 'ext': ext, 'protocol': 'm3u8', 'preference': preference - 100 if preference else -100, 'resolution': 'multiple', 'format_note': 'Quality selection URL', } def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None, entry_protocol='m3u8', preference=None, m3u8_id=None, note=None, errnote=None, fatal=True, live=False): res = self._download_webpage_handle( m3u8_url, video_id, note=note or 'Downloading m3u8 information', errnote=errnote or 'Failed to download m3u8 information', fatal=fatal) if res is False: return [] m3u8_doc, urlh = res m3u8_url = urlh.geturl() return self._parse_m3u8_formats( m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol, preference=preference, m3u8_id=m3u8_id, live=live) def _parse_m3u8_formats(self, m3u8_doc, m3u8_url, ext=None, entry_protocol='m3u8', preference=None, m3u8_id=None, live=False): if '#EXT-X-FAXS-CM:' in m3u8_doc: # Adobe Flash Access return [] if re.search(r'#EXT-X-SESSION-KEY:.*?URI="skd://', m3u8_doc): # Apple FairPlay return [] formats = [] format_url = lambda u: ( u if re.match(r'^https?://', u) else compat_urlparse.urljoin(m3u8_url, u)) # References: # 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21 # 2. https://github.com/rg3/youtube-dl/issues/12211 # We should try extracting formats only from master playlists [1, 4.3.4], # i.e. playlists that describe available qualities. On the other hand # media playlists [1, 4.3.3] should be returned as is since they contain # just the media without qualities renditions. # Fortunately, master playlist can be easily distinguished from media # playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4] # master playlist tags MUST NOT appear in a media playist and vice versa. # As of [1, 4.3.3.1] #EXT-X-TARGETDURATION tag is REQUIRED for every # media playlist and MUST NOT appear in master playlist thus we can # clearly detect media playlist with this criterion. if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is return [{ 'url': m3u8_url, 'format_id': m3u8_id, 'ext': ext, 'protocol': entry_protocol, 'preference': preference, }] groups = {} last_stream_inf = {} def extract_media(x_media_line): media = parse_m3u8_attributes(x_media_line) # As per [1, 4.3.4.1] TYPE, GROUP-ID and NAME are REQUIRED media_type, group_id, name = media.get('TYPE'), media.get('GROUP-ID'), media.get('NAME') if not (media_type and group_id and name): return groups.setdefault(group_id, []).append(media) if media_type not in ('VIDEO', 'AUDIO'): return media_url = media.get('URI') if media_url: format_id = [] for v in (m3u8_id, group_id, name): if v: format_id.append(v) f = { 'format_id': '-'.join(format_id), 'url': format_url(media_url), 'manifest_url': m3u8_url, 'language': media.get('LANGUAGE'), 'ext': ext, 'protocol': entry_protocol, 'preference': preference, } if media_type == 'AUDIO': f['vcodec'] = 'none' formats.append(f) def build_stream_name(): # Despite specification does not mention NAME attribute for # EXT-X-STREAM-INF tag it still sometimes may be present (see [1] # or vidio test in TestInfoExtractor.test_parse_m3u8_formats) # 1. http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015 stream_name = last_stream_inf.get('NAME') if stream_name: return stream_name # If there is no NAME in EXT-X-STREAM-INF it will be obtained # from corresponding rendition group stream_group_id = last_stream_inf.get('VIDEO') if not stream_group_id: return stream_group = groups.get(stream_group_id) if not stream_group: return stream_group_id rendition = stream_group[0] return rendition.get('NAME') or stream_group_id for line in m3u8_doc.splitlines(): if line.startswith('#EXT-X-STREAM-INF:'): last_stream_inf = parse_m3u8_attributes(line) elif line.startswith('#EXT-X-MEDIA:'): extract_media(line) elif line.startswith('#') or not line.strip(): continue else: tbr = float_or_none( last_stream_inf.get('AVERAGE-BANDWIDTH') or last_stream_inf.get('BANDWIDTH'), scale=1000) format_id = [] if m3u8_id: format_id.append(m3u8_id) stream_name = build_stream_name() # Bandwidth of live streams may differ over time thus making # format_id unpredictable. So it's better to keep provided # format_id intact. if not live: format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats))) manifest_url = format_url(line.strip()) f = { 'format_id': '-'.join(format_id), 'url': manifest_url, 'manifest_url': m3u8_url, 'tbr': tbr, 'ext': ext, 'fps': float_or_none(last_stream_inf.get('FRAME-RATE')), 'protocol': entry_protocol, 'preference': preference, } resolution = last_stream_inf.get('RESOLUTION') if resolution: mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution) if mobj: f['width'] = int(mobj.group('width')) f['height'] = int(mobj.group('height')) # Unified Streaming Platform mobj = re.search( r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url']) if mobj: abr, vbr = mobj.groups() abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000) f.update({ 'vbr': vbr, 'abr': abr, }) codecs = parse_codecs(last_stream_inf.get('CODECS')) f.update(codecs) audio_group_id = last_stream_inf.get('AUDIO') # As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which # references a rendition group MUST have a CODECS attribute. # However, this is not always respected, for example, [2] # contains EXT-X-STREAM-INF tag which references AUDIO # rendition group but does not have CODECS and despite # referencing audio group an audio group, it represents # a complete (with audio and video) format. So, for such cases # we will ignore references to rendition groups and treat them # as complete formats. if audio_group_id and codecs and f.get('vcodec') != 'none': audio_group = groups.get(audio_group_id) if audio_group and audio_group[0].get('URI'): # TODO: update acodec for audio only formats with # the same GROUP-ID f['acodec'] = 'none' formats.append(f) last_stream_inf = {} return formats @staticmethod def _xpath_ns(path, namespace=None): if not namespace: return path out = [] for c in path.split('/'): if not c or c == '.': out.append(c) else: out.append('{%s}%s' % (namespace, c)) return '/'.join(out) def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None): smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source) if smil is False: assert not fatal return [] namespace = self._parse_smil_namespace(smil) return self._parse_smil_formats( smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params) def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None): smil = self._download_smil(smil_url, video_id, fatal=fatal) if smil is False: return {} return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params) def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None): return self._download_xml( smil_url, video_id, 'Downloading SMIL file', 'Unable to download SMIL file', fatal=fatal, transform_source=transform_source) def _parse_smil(self, smil, smil_url, video_id, f4m_params=None): namespace = self._parse_smil_namespace(smil) formats = self._parse_smil_formats( smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params) subtitles = self._parse_smil_subtitles(smil, namespace=namespace) video_id = os.path.splitext(url_basename(smil_url))[0] title = None description = None upload_date = None for meta in smil.findall(self._xpath_ns('./head/meta', namespace)): name = meta.attrib.get('name') content = meta.attrib.get('content') if not name or not content: continue if not title and name == 'title': title = content elif not description and name in ('description', 'abstract'): description = content elif not upload_date and name == 'date': upload_date = unified_strdate(content) thumbnails = [{ 'id': image.get('type'), 'url': image.get('src'), 'width': int_or_none(image.get('width')), 'height': int_or_none(image.get('height')), } for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')] return { 'id': video_id, 'title': title or video_id, 'description': description, 'upload_date': upload_date, 'thumbnails': thumbnails, 'formats': formats, 'subtitles': subtitles, } def _parse_smil_namespace(self, smil): return self._search_regex( r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None) def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None): base = smil_url for meta in smil.findall(self._xpath_ns('./head/meta', namespace)): b = meta.get('base') or meta.get('httpBase') if b: base = b break formats = [] rtmp_count = 0 http_count = 0 m3u8_count = 0 srcs = [] media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace)) for medium in media: src = medium.get('src') if not src or src in srcs: continue srcs.append(src) bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000) filesize = int_or_none(medium.get('size') or medium.get('fileSize')) width = int_or_none(medium.get('width')) height = int_or_none(medium.get('height')) proto = medium.get('proto') ext = medium.get('ext') src_ext = determine_ext(src) streamer = medium.get('streamer') or base if proto == 'rtmp' or streamer.startswith('rtmp'): rtmp_count += 1 formats.append({ 'url': streamer, 'play_path': src, 'ext': 'flv', 'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate), 'tbr': bitrate, 'filesize': filesize, 'width': width, 'height': height, }) if transform_rtmp_url: streamer, src = transform_rtmp_url(streamer, src) formats[-1].update({ 'url': streamer, 'play_path': src, }) continue src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src) src_url = src_url.strip() if proto == 'm3u8' or src_ext == 'm3u8': m3u8_formats = self._extract_m3u8_formats( src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False) if len(m3u8_formats) == 1: m3u8_count += 1 m3u8_formats[0].update({ 'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate), 'tbr': bitrate, 'width': width, 'height': height, }) formats.extend(m3u8_formats) continue if src_ext == 'f4m': f4m_url = src_url if not f4m_params: f4m_params = { 'hdcore': '3.2.0', 'plugin': 'flowplayer-3.2.0.1', } f4m_url += '&' if '?' in f4m_url else '?' f4m_url += compat_urllib_parse_urlencode(f4m_params) formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False)) continue if src_url.startswith('http') and self._is_valid_url(src, video_id): http_count += 1 formats.append({ 'url': src_url, 'ext': ext or src_ext or 'flv', 'format_id': 'http-%d' % (bitrate or http_count), 'tbr': bitrate, 'filesize': filesize, 'width': width, 'height': height, }) continue return formats def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'): urls = [] subtitles = {} for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))): src = textstream.get('src') if not src or src in urls: continue urls.append(src) ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src) lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang subtitles.setdefault(lang, []).append({ 'url': src, 'ext': ext, }) return subtitles def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True): xspf = self._download_xml( playlist_url, playlist_id, 'Downloading xpsf playlist', 'Unable to download xspf manifest', fatal=fatal) if xspf is False: return [] return self._parse_xspf(xspf, playlist_id) def _parse_xspf(self, playlist, playlist_id): NS_MAP = { 'xspf': 'http://xspf.org/ns/0/', 's1': 'http://static.streamone.nl/player/ns/0', } entries = [] for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)): title = xpath_text( track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id) description = xpath_text( track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description') thumbnail = xpath_text( track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail') duration = float_or_none( xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000) formats = [{ 'url': location.text, 'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)), 'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))), 'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))), } for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))] self._sort_formats(formats) entries.append({ 'id': playlist_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'formats': formats, }) return entries def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}): res = self._download_webpage_handle( mpd_url, video_id, note=note or 'Downloading MPD manifest', errnote=errnote or 'Failed to download MPD manifest', fatal=fatal) if res is False: return [] mpd, urlh = res mpd_base_url = base_url(urlh.geturl()) return self._parse_mpd_formats( compat_etree_fromstring(mpd.encode('utf-8')), mpd_id, mpd_base_url, formats_dict=formats_dict, mpd_url=mpd_url) def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}, mpd_url=None): """ Parse formats from MPD manifest. References: 1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E), http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip 2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP """ if mpd_doc.get('type') == 'dynamic': return [] namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None) def _add_ns(path): return self._xpath_ns(path, namespace) def is_drm_protected(element): return element.find(_add_ns('ContentProtection')) is not None def extract_multisegment_info(element, ms_parent_info): ms_info = ms_parent_info.copy() # As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some # common attributes and elements. We will only extract relevant # for us. def extract_common(source): segment_timeline = source.find(_add_ns('SegmentTimeline')) if segment_timeline is not None: s_e = segment_timeline.findall(_add_ns('S')) if s_e: ms_info['total_number'] = 0 ms_info['s'] = [] for s in s_e: r = int(s.get('r', 0)) ms_info['total_number'] += 1 + r ms_info['s'].append({ 't': int(s.get('t', 0)), # @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60]) 'd': int(s.attrib['d']), 'r': r, }) start_number = source.get('startNumber') if start_number: ms_info['start_number'] = int(start_number) timescale = source.get('timescale') if timescale: ms_info['timescale'] = int(timescale) segment_duration = source.get('duration') if segment_duration: ms_info['segment_duration'] = float(segment_duration) def extract_Initialization(source): initialization = source.find(_add_ns('Initialization')) if initialization is not None: ms_info['initialization_url'] = initialization.attrib['sourceURL'] segment_list = element.find(_add_ns('SegmentList')) if segment_list is not None: extract_common(segment_list) extract_Initialization(segment_list) segment_urls_e = segment_list.findall(_add_ns('SegmentURL')) if segment_urls_e: ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e] else: segment_template = element.find(_add_ns('SegmentTemplate')) if segment_template is not None: extract_common(segment_template) media = segment_template.get('media') if media: ms_info['media'] = media initialization = segment_template.get('initialization') if initialization: ms_info['initialization'] = initialization else: extract_Initialization(segment_template) return ms_info mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration')) formats = [] for period in mpd_doc.findall(_add_ns('Period')): period_duration = parse_duration(period.get('duration')) or mpd_duration period_ms_info = extract_multisegment_info(period, { 'start_number': 1, 'timescale': 1, }) for adaptation_set in period.findall(_add_ns('AdaptationSet')): if is_drm_protected(adaptation_set): continue adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info) for representation in adaptation_set.findall(_add_ns('Representation')): if is_drm_protected(representation): continue representation_attrib = adaptation_set.attrib.copy() representation_attrib.update(representation.attrib) # According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory mime_type = representation_attrib['mimeType'] content_type = mime_type.split('/')[0] if content_type == 'text': # TODO implement WebVTT downloading pass elif content_type in ('video', 'audio'): base_url = '' for element in (representation, adaptation_set, period, mpd_doc): base_url_e = element.find(_add_ns('BaseURL')) if base_url_e is not None: base_url = base_url_e.text + base_url if re.match(r'^https?://', base_url): break if mpd_base_url and not re.match(r'^https?://', base_url): if not mpd_base_url.endswith('/') and not base_url.startswith('/'): mpd_base_url += '/' base_url = mpd_base_url + base_url representation_id = representation_attrib.get('id') lang = representation_attrib.get('lang') url_el = representation.find(_add_ns('BaseURL')) filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None) bandwidth = int_or_none(representation_attrib.get('bandwidth')) f = { 'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id, 'url': base_url, 'manifest_url': mpd_url, 'ext': mimetype2ext(mime_type), 'width': int_or_none(representation_attrib.get('width')), 'height': int_or_none(representation_attrib.get('height')), 'tbr': float_or_none(bandwidth, 1000), 'asr': int_or_none(representation_attrib.get('audioSamplingRate')), 'fps': int_or_none(representation_attrib.get('frameRate')), 'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None, 'format_note': 'DASH %s' % content_type, 'filesize': filesize, } f.update(parse_codecs(representation_attrib.get('codecs'))) representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info) def prepare_template(template_name, identifiers): t = representation_ms_info[template_name] t = t.replace('$RepresentationID$', representation_id) t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t) t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t) t.replace('$$', '$') return t # @initialization is a regular template like @media one # so it should be handled just the same way (see # https://github.com/rg3/youtube-dl/issues/11605) if 'initialization' in representation_ms_info: initialization_template = prepare_template( 'initialization', # As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and # $Time$ shall not be included for @initialization thus # only $Bandwidth$ remains ('Bandwidth', )) representation_ms_info['initialization_url'] = initialization_template % { 'Bandwidth': bandwidth, } def location_key(location): return 'url' if re.match(r'^https?://', location) else 'path' if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info: media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time')) media_location_key = location_key(media_template) # As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$ # can't be used at the same time if '%(Number' in media_template and 's' not in representation_ms_info: segment_duration = None if 'total_number' not in representation_ms_info and 'segment_duration' in representation_ms_info: segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale']) representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration)) representation_ms_info['fragments'] = [{ media_location_key: media_template % { 'Number': segment_number, 'Bandwidth': bandwidth, }, 'duration': segment_duration, } for segment_number in range( representation_ms_info['start_number'], representation_ms_info['total_number'] + representation_ms_info['start_number'])] else: # $Number*$ or $Time$ in media template with S list available # Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg # Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411 representation_ms_info['fragments'] = [] segment_time = 0 segment_d = None segment_number = representation_ms_info['start_number'] def add_segment_url(): segment_url = media_template % { 'Time': segment_time, 'Bandwidth': bandwidth, 'Number': segment_number, } representation_ms_info['fragments'].append({ media_location_key: segment_url, 'duration': float_or_none(segment_d, representation_ms_info['timescale']), }) for num, s in enumerate(representation_ms_info['s']): segment_time = s.get('t') or segment_time segment_d = s['d'] add_segment_url() segment_number += 1 for r in range(s.get('r', 0)): segment_time += segment_d add_segment_url() segment_number += 1 segment_time += segment_d elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info: # No media template # Example: https://www.youtube.com/watch?v=iXZV5uAYMJI # or any YouTube dashsegments video fragments = [] segment_index = 0 timescale = representation_ms_info['timescale'] for s in representation_ms_info['s']: duration = float_or_none(s['d'], timescale) for r in range(s.get('r', 0) + 1): segment_uri = representation_ms_info['segment_urls'][segment_index] fragments.append({ location_key(segment_uri): segment_uri, 'duration': duration, }) segment_index += 1 representation_ms_info['fragments'] = fragments elif 'segment_urls' in representation_ms_info: # Segment URLs with no SegmentTimeline # Example: https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091 # https://github.com/rg3/youtube-dl/pull/14844 fragments = [] segment_duration = float_or_none( representation_ms_info['segment_duration'], representation_ms_info['timescale']) if 'segment_duration' in representation_ms_info else None for segment_url in representation_ms_info['segment_urls']: fragment = { location_key(segment_url): segment_url, } if segment_duration: fragment['duration'] = segment_duration fragments.append(fragment) representation_ms_info['fragments'] = fragments # NB: MPD manifest may contain direct URLs to unfragmented media. # No fragments key is present in this case. if 'fragments' in representation_ms_info: f.update({ 'fragment_base_url': base_url, 'fragments': [], 'protocol': 'http_dash_segments', }) if 'initialization_url' in representation_ms_info: initialization_url = representation_ms_info['initialization_url'] if not f.get('url'): f['url'] = initialization_url f['fragments'].append({location_key(initialization_url): initialization_url}) f['fragments'].extend(representation_ms_info['fragments']) # According to [1, 5.3.5.2, Table 7, page 35] @id of Representation # is not necessarily unique within a Period thus formats with # the same `format_id` are quite possible. There are numerous examples # of such manifests (see https://github.com/rg3/youtube-dl/issues/15111, # https://github.com/rg3/youtube-dl/issues/13919) full_info = formats_dict.get(representation_id, {}).copy() full_info.update(f) formats.append(full_info) else: self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type) return formats def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True): res = self._download_webpage_handle( ism_url, video_id, note=note or 'Downloading ISM manifest', errnote=errnote or 'Failed to download ISM manifest', fatal=fatal) if res is False: return [] ism, urlh = res return self._parse_ism_formats( compat_etree_fromstring(ism.encode('utf-8')), urlh.geturl(), ism_id) def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None): """ Parse formats from ISM manifest. References: 1. [MS-SSTR]: Smooth Streaming Protocol, https://msdn.microsoft.com/en-us/library/ff469518.aspx """ if ism_doc.get('IsLive') == 'TRUE' or ism_doc.find('Protection') is not None: return [] duration = int(ism_doc.attrib['Duration']) timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000 formats = [] for stream in ism_doc.findall('StreamIndex'): stream_type = stream.get('Type') if stream_type not in ('video', 'audio'): continue url_pattern = stream.attrib['Url'] stream_timescale = int_or_none(stream.get('TimeScale')) or timescale stream_name = stream.get('Name') for track in stream.findall('QualityLevel'): fourcc = track.get('FourCC') # TODO: add support for WVC1 and WMAP if fourcc not in ('H264', 'AVC1', 'AACL'): self.report_warning('%s is not a supported codec' % fourcc) continue tbr = int(track.attrib['Bitrate']) // 1000 # [1] does not mention Width and Height attributes. However, # they're often present while MaxWidth and MaxHeight are # missing, so should be used as fallbacks width = int_or_none(track.get('MaxWidth') or track.get('Width')) height = int_or_none(track.get('MaxHeight') or track.get('Height')) sampling_rate = int_or_none(track.get('SamplingRate')) track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern) track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern) fragments = [] fragment_ctx = { 'time': 0, } stream_fragments = stream.findall('c') for stream_fragment_index, stream_fragment in enumerate(stream_fragments): fragment_ctx['time'] = int_or_none(stream_fragment.get('t')) or fragment_ctx['time'] fragment_repeat = int_or_none(stream_fragment.get('r')) or 1 fragment_ctx['duration'] = int_or_none(stream_fragment.get('d')) if not fragment_ctx['duration']: try: next_fragment_time = int(stream_fragment[stream_fragment_index + 1].attrib['t']) except IndexError: next_fragment_time = duration fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat for _ in range(fragment_repeat): fragments.append({ 'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern), 'duration': fragment_ctx['duration'] / stream_timescale, }) fragment_ctx['time'] += fragment_ctx['duration'] format_id = [] if ism_id: format_id.append(ism_id) if stream_name: format_id.append(stream_name) format_id.append(compat_str(tbr)) formats.append({ 'format_id': '-'.join(format_id), 'url': ism_url, 'manifest_url': ism_url, 'ext': 'ismv' if stream_type == 'video' else 'isma', 'width': width, 'height': height, 'tbr': tbr, 'asr': sampling_rate, 'vcodec': 'none' if stream_type == 'audio' else fourcc, 'acodec': 'none' if stream_type == 'video' else fourcc, 'protocol': 'ism', 'fragments': fragments, '_download_params': { 'duration': duration, 'timescale': stream_timescale, 'width': width or 0, 'height': height or 0, 'fourcc': fourcc, 'codec_private_data': track.get('CodecPrivateData'), 'sampling_rate': sampling_rate, 'channels': int_or_none(track.get('Channels', 2)), 'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)), 'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)), }, }) return formats def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None, preference=None): def absolute_url(video_url): return compat_urlparse.urljoin(base_url, video_url) def parse_content_type(content_type): if not content_type: return {} ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type) if ctr: mimetype, codecs = ctr.groups() f = parse_codecs(codecs) f['ext'] = mimetype2ext(mimetype) return f return {} def _media_formats(src, cur_media_type, type_info={}): full_url = absolute_url(src) ext = type_info.get('ext') or determine_ext(full_url) if ext == 'm3u8': is_plain_url = False formats = self._extract_m3u8_formats( full_url, video_id, ext='mp4', entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id, preference=preference, fatal=False) elif ext == 'mpd': is_plain_url = False formats = self._extract_mpd_formats( full_url, video_id, mpd_id=mpd_id, fatal=False) else: is_plain_url = True formats = [{ 'url': full_url, 'vcodec': 'none' if cur_media_type == 'audio' else None, }] return is_plain_url, formats entries = [] # amp-video and amp-audio are very similar to their HTML5 counterparts # so we wll include them right here (see # https://www.ampproject.org/docs/reference/components/amp-video) media_tags = [(media_tag, media_type, '') for media_tag, media_type in re.findall(r'(?s)(<(?:amp-)?(video|audio)[^>]*/>)', webpage)] media_tags.extend(re.findall( # We only allow video|audio followed by a whitespace or '>'. # Allowing more characters may end up in significant slow down (see # https://github.com/rg3/youtube-dl/issues/11979, example URL: # http://www.porntrex.com/maps/videositemap.xml). r'(?s)(<(?P<tag>(?:amp-)?(?:video|audio))(?:\s+[^>]*)?>)(.*?)</(?P=tag)>', webpage)) for media_tag, media_type, media_content in media_tags: media_info = { 'formats': [], 'subtitles': {}, } media_attributes = extract_attributes(media_tag) src = media_attributes.get('src') if src: _, formats = _media_formats(src, media_type) media_info['formats'].extend(formats) media_info['thumbnail'] = media_attributes.get('poster') if media_content: for source_tag in re.findall(r'<source[^>]+>', media_content): source_attributes = extract_attributes(source_tag) src = source_attributes.get('src') if not src: continue f = parse_content_type(source_attributes.get('type')) is_plain_url, formats = _media_formats(src, media_type, f) if is_plain_url: # res attribute is not standard but seen several times # in the wild f.update({ 'height': int_or_none(source_attributes.get('res')), 'format_id': source_attributes.get('label'), }) f.update(formats[0]) media_info['formats'].append(f) else: media_info['formats'].extend(formats) for track_tag in re.findall(r'<track[^>]+>', media_content): track_attributes = extract_attributes(track_tag) kind = track_attributes.get('kind') if not kind or kind in ('subtitles', 'captions'): src = track_attributes.get('src') if not src: continue lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label') media_info['subtitles'].setdefault(lang, []).append({ 'url': absolute_url(src), }) if media_info['formats'] or media_info['subtitles']: entries.append(media_info) return entries def _extract_akamai_formats(self, manifest_url, video_id, hosts={}): formats = [] hdcore_sign = 'hdcore=3.7.0' f4m_url = re.sub(r'(https?://[^/]+)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m') hds_host = hosts.get('hds') if hds_host: f4m_url = re.sub(r'(https?://)[^/]+', r'\1' + hds_host, f4m_url) if 'hdcore=' not in f4m_url: f4m_url += ('&' if '?' in f4m_url else '?') + hdcore_sign f4m_formats = self._extract_f4m_formats( f4m_url, video_id, f4m_id='hds', fatal=False) for entry in f4m_formats: entry.update({'extra_param_to_segment_url': hdcore_sign}) formats.extend(f4m_formats) m3u8_url = re.sub(r'(https?://[^/]+)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8') hls_host = hosts.get('hls') if hls_host: m3u8_url = re.sub(r'(https?://)[^/]+', r'\1' + hls_host, m3u8_url) formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) return formats def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]): query = compat_urlparse.urlparse(url).query url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url) url_base = self._search_regex( r'(?:(?:https?|rtmp|rtsp):)?(//[^?]+)', url, 'format url') http_base_url = '%s:%s' % ('http', url_base) formats = [] def manifest_url(manifest): m_url = '%s/%s' % (http_base_url, manifest) if query: m_url += '?%s' % query return m_url if 'm3u8' not in skip_protocols: formats.extend(self._extract_m3u8_formats( manifest_url('playlist.m3u8'), video_id, 'mp4', m3u8_entry_protocol, m3u8_id='hls', fatal=False)) if 'f4m' not in skip_protocols: formats.extend(self._extract_f4m_formats( manifest_url('manifest.f4m'), video_id, f4m_id='hds', fatal=False)) if 'dash' not in skip_protocols: formats.extend(self._extract_mpd_formats( manifest_url('manifest.mpd'), video_id, mpd_id='dash', fatal=False)) if re.search(r'(?:/smil:|\.smil)', url_base): if 'smil' not in skip_protocols: rtmp_formats = self._extract_smil_formats( manifest_url('jwplayer.smil'), video_id, fatal=False) for rtmp_format in rtmp_formats: rtsp_format = rtmp_format.copy() rtsp_format['url'] = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path']) del rtsp_format['play_path'] del rtsp_format['ext'] rtsp_format.update({ 'url': rtsp_format['url'].replace('rtmp://', 'rtsp://'), 'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'), 'protocol': 'rtsp', }) formats.extend([rtmp_format, rtsp_format]) else: for protocol in ('rtmp', 'rtsp'): if protocol not in skip_protocols: formats.append({ 'url': '%s:%s' % (protocol, url_base), 'format_id': protocol, 'protocol': protocol, }) return formats def _find_jwplayer_data(self, webpage, video_id=None, transform_source=js_to_json): mobj = re.search( r'(?s)jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)(?!</script>).*?\.setup\s*\((?P<options>[^)]+)\)', webpage) if mobj: try: jwplayer_data = self._parse_json(mobj.group('options'), video_id=video_id, transform_source=transform_source) except ExtractorError: pass else: if isinstance(jwplayer_data, dict): return jwplayer_data def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs): jwplayer_data = self._find_jwplayer_data( webpage, video_id, transform_source=js_to_json) return self._parse_jwplayer_data( jwplayer_data, video_id, *args, **kwargs) def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True, m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None): # JWPlayer backward compatibility: flattened playlists # https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96 if 'playlist' not in jwplayer_data: jwplayer_data = {'playlist': [jwplayer_data]} entries = [] # JWPlayer backward compatibility: single playlist item # https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10 if not isinstance(jwplayer_data['playlist'], list): jwplayer_data['playlist'] = [jwplayer_data['playlist']] for video_data in jwplayer_data['playlist']: # JWPlayer backward compatibility: flattened sources # https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35 if 'sources' not in video_data: video_data['sources'] = [video_data] this_video_id = video_id or video_data['mediaid'] formats = self._parse_jwplayer_formats( video_data['sources'], video_id=this_video_id, m3u8_id=m3u8_id, mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url) subtitles = {} tracks = video_data.get('tracks') if tracks and isinstance(tracks, list): for track in tracks: if not isinstance(track, dict): continue if track.get('kind') != 'captions': continue track_url = urljoin(base_url, track.get('file')) if not track_url: continue subtitles.setdefault(track.get('label') or 'en', []).append({ 'url': self._proto_relative_url(track_url) }) entry = { 'id': this_video_id, 'title': unescapeHTML(video_data['title'] if require_title else video_data.get('title')), 'description': video_data.get('description'), 'thumbnail': self._proto_relative_url(video_data.get('image')), 'timestamp': int_or_none(video_data.get('pubdate')), 'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')), 'subtitles': subtitles, } # https://github.com/jwplayer/jwplayer/blob/master/src/js/utils/validator.js#L32 if len(formats) == 1 and re.search(r'^(?:http|//).*(?:youtube\.com|youtu\.be)/.+', formats[0]['url']): entry.update({ '_type': 'url_transparent', 'url': formats[0]['url'], }) else: self._sort_formats(formats) entry['formats'] = formats entries.append(entry) if len(entries) == 1: return entries[0] else: return self.playlist_result(entries) def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None, m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None): urls = [] formats = [] for source in jwplayer_sources_data: if not isinstance(source, dict): continue source_url = self._proto_relative_url(source.get('file')) if not source_url: continue if base_url: source_url = compat_urlparse.urljoin(base_url, source_url) if source_url in urls: continue urls.append(source_url) source_type = source.get('type') or '' ext = mimetype2ext(source_type) or determine_ext(source_url) if source_type == 'hls' or ext == 'm3u8': formats.extend(self._extract_m3u8_formats( source_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id=m3u8_id, fatal=False)) elif ext == 'mpd': formats.extend(self._extract_mpd_formats( source_url, video_id, mpd_id=mpd_id, fatal=False)) elif ext == 'smil': formats.extend(self._extract_smil_formats( source_url, video_id, fatal=False)) # https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67 elif source_type.startswith('audio') or ext in ( 'oga', 'aac', 'mp3', 'mpeg', 'vorbis'): formats.append({ 'url': source_url, 'vcodec': 'none', 'ext': ext, }) else: height = int_or_none(source.get('height')) if height is None: # Often no height is provided but there is a label in # format like "1080p", "720p SD", or 1080. height = int_or_none(self._search_regex( r'^(\d{3,4})[pP]?(?:\b|$)', compat_str(source.get('label') or ''), 'height', default=None)) a_format = { 'url': source_url, 'width': int_or_none(source.get('width')), 'height': height, 'tbr': int_or_none(source.get('bitrate')), 'ext': ext, } if source_url.startswith('rtmp'): a_format['ext'] = 'flv' # See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as # of jwplayer.flash.swf rtmp_url_parts = re.split( r'((?:mp4|mp3|flv):)', source_url, 1) if len(rtmp_url_parts) == 3: rtmp_url, prefix, play_path = rtmp_url_parts a_format.update({ 'url': rtmp_url, 'play_path': prefix + play_path, }) if rtmp_params: a_format.update(rtmp_params) formats.append(a_format) return formats def _live_title(self, name): """ Generate the title for a live video """ now = datetime.datetime.now() now_str = now.strftime('%Y-%m-%d %H:%M') return name + ' ' + now_str def _int(self, v, name, fatal=False, **kwargs): res = int_or_none(v, **kwargs) if 'get_attr' in kwargs: print(getattr(v, kwargs['get_attr'])) if res is None: msg = 'Failed to extract %s: Could not parse value %r' % (name, v) if fatal: raise ExtractorError(msg) else: self._downloader.report_warning(msg) return res def _float(self, v, name, fatal=False, **kwargs): res = float_or_none(v, **kwargs) if res is None: msg = 'Failed to extract %s: Could not parse value %r' % (name, v) if fatal: raise ExtractorError(msg) else: self._downloader.report_warning(msg) return res def _set_cookie(self, domain, name, value, expire_time=None, port=None, path='/', secure=False, discard=False, rest={}, **kwargs): cookie = compat_cookiejar.Cookie( 0, name, value, port, port is not None, domain, True, domain.startswith('.'), path, True, secure, expire_time, discard, None, None, rest) self._downloader.cookiejar.set_cookie(cookie) def _get_cookies(self, url): """ Return a compat_cookies.SimpleCookie with the cookies for the url """ req = sanitized_Request(url) self._downloader.cookiejar.add_cookie_header(req) return compat_cookies.SimpleCookie(req.get_header('Cookie')) def get_testcases(self, include_onlymatching=False): t = getattr(self, '_TEST', None) if t: assert not hasattr(self, '_TESTS'), \ '%s has _TEST and _TESTS' % type(self).__name__ tests = [t] else: tests = getattr(self, '_TESTS', []) for t in tests: if not include_onlymatching and t.get('only_matching', False): continue t['name'] = type(self).__name__[:-len('IE')] yield t def is_suitable(self, age_limit): """ Test whether the extractor is generally suitable for the given age limit (i.e. pornographic sites are not, all others usually are) """ any_restricted = False for tc in self.get_testcases(include_onlymatching=False): if tc.get('playlist', []): tc = tc['playlist'][0] is_restricted = age_restricted( tc.get('info_dict', {}).get('age_limit'), age_limit) if not is_restricted: return True any_restricted = any_restricted or is_restricted return not any_restricted def extract_subtitles(self, *args, **kwargs): if (self._downloader.params.get('writesubtitles', False) or self._downloader.params.get('listsubtitles')): return self._get_subtitles(*args, **kwargs) return {} def _get_subtitles(self, *args, **kwargs): raise NotImplementedError('This method must be implemented by subclasses') @staticmethod def _merge_subtitle_items(subtitle_list1, subtitle_list2): """ Merge subtitle items for one language. Items with duplicated URLs will be dropped. """ list1_urls = set([item['url'] for item in subtitle_list1]) ret = list(subtitle_list1) ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls]) return ret @classmethod def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2): """ Merge two subtitle dictionaries, language by language. """ ret = dict(subtitle_dict1) for lang in subtitle_dict2: ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang]) return ret def extract_automatic_captions(self, *args, **kwargs): if (self._downloader.params.get('writeautomaticsub', False) or self._downloader.params.get('listsubtitles')): return self._get_automatic_captions(*args, **kwargs) return {} def _get_automatic_captions(self, *args, **kwargs): raise NotImplementedError('This method must be implemented by subclasses') def mark_watched(self, *args, **kwargs): if (self._downloader.params.get('mark_watched', False) and (self._get_login_info()[0] is not None or self._downloader.params.get('cookiefile') is not None)): self._mark_watched(*args, **kwargs) def _mark_watched(self, *args, **kwargs): raise NotImplementedError('This method must be implemented by subclasses') def geo_verification_headers(self): headers = {} geo_verification_proxy = self._downloader.params.get('geo_verification_proxy') if geo_verification_proxy: headers['Ytdl-request-proxy'] = geo_verification_proxy return headers def _generic_id(self, url): return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0]) def _generic_title(self, url): return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0]) class SearchInfoExtractor(InfoExtractor): """ Base class for paged search queries extractors. They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query} Instances should define _SEARCH_KEY and _MAX_RESULTS. """ @classmethod def _make_valid_url(cls): return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY @classmethod def suitable(cls, url): return re.match(cls._make_valid_url(), url) is not None def _real_extract(self, query): mobj = re.match(self._make_valid_url(), query) if mobj is None: raise ExtractorError('Invalid search query "%s"' % query) prefix = mobj.group('prefix') query = mobj.group('query') if prefix == '': return self._get_n_results(query, 1) elif prefix == 'all': return self._get_n_results(query, self._MAX_RESULTS) else: n = int(prefix) if n <= 0: raise ExtractorError('invalid download number %s for query "%s"' % (n, query)) elif n > self._MAX_RESULTS: self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n)) n = self._MAX_RESULTS return self._get_n_results(query, n) def _get_n_results(self, query, n): """Get a specified number of results for a query""" raise NotImplementedError('This method must be implemented by subclasses') @property def SEARCH_KEY(self): return self._SEARCH_KEY
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-2-fixed/youtube-dl/youtube_dl/extractor/common.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-2-buggy/youtube-dl/youtube_dl/extractor/common.py
youtube-dl-bug-25
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals import base64 import binascii import calendar import codecs import contextlib import ctypes import datetime import email.utils import errno import functools import gzip import io import itertools import json import locale import math import operator import os import pipes import platform import re import socket import ssl import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_HTMLParser, compat_basestring, compat_chr, compat_etree_fromstring, compat_html_entities, compat_html_entities_html5, compat_http_client, compat_kwargs, compat_parse_qs, compat_shlex_quote, compat_socket_create_connection, compat_str, compat_struct_pack, compat_struct_unpack, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, compat_urllib_parse_unquote_plus, compat_urllib_request, compat_urlparse, compat_xpath, ) from .socks import ( ProxyType, sockssocket, ) def register_socks_protocols(): # "Register" SOCKS protocols # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904 # URLs with protocols not in urlparse.uses_netloc are not handled correctly for scheme in ('socks', 'socks4', 'socks4a', 'socks5'): if scheme not in compat_urlparse.uses_netloc: compat_urlparse.uses_netloc.append(scheme) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] KNOWN_EXTENSIONS = ( 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', 'flv', 'f4v', 'f4a', 'f4b', 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', 'mkv', 'mka', 'mk3d', 'avi', 'divx', 'mov', 'asf', 'wmv', 'wma', '3gp', '3g2', 'mp3', 'flac', 'ape', 'wav', 'f4f', 'f4m', 'm3u8', 'smil') # needed for sanitizing filenames in restricted mode ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'], 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy'))) DATE_FORMATS = ( '%d %B %Y', '%d %b %Y', '%B %d %Y', '%b %d %Y', '%b %dst %Y %I:%M', '%b %dnd %Y %I:%M', '%b %dth %Y %I:%M', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', ) DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS) DATE_FORMATS_DAY_FIRST.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d.%m.%y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS) DATE_FORMATS_MONTH_FIRST.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): for f in node.findall(compat_xpath(xpath)): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): return node.find(compat_xpath(xpath)) if isinstance(xpath, (str, compat_str)): n = _find_xpath(xpath) else: for xp in xpath: n = _find_xpath(xp) if n is not None: break if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute('id', id, html) def get_element_by_class(class_name, html): return get_element_by_attribute( 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name), html, escape_value=False) def get_element_by_attribute(attribute, value, html, escape_value=True): """Return the content of the tag with the specified attribute in the passed HTML document""" value = re.escape(value) if escape_value else value m = re.search(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), value), html) if not m: return None res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] return unescapeHTML(res) class HTMLAttributeParser(compat_HTMLParser): """Trivial HTML parser to gather the attributes for a single element""" def __init__(self): self.attrs = {} compat_HTMLParser.__init__(self) def handle_starttag(self, tag, attrs): self.attrs = dict(attrs) def extract_attributes(html_element): """Given a string for an HTML element such as <el a="foo" B="bar" c="&98;az" d=boz empty= noval entity="&amp;" sq='"' dq="'" > Decode and return a dictionary of attributes. { 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz', 'empty': '', 'noval': None, 'entity': '&', 'sq': '"', 'dq': '\'' }. NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions, but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5. """ parser = HTMLAttributeParser() parser.feed(html_element) parser.close() return parser.attrs def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if restricted and char in ACCENT_CHARS: return ACCENT_CHARS[char] if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of # unwanted failures due to missing protocol def sanitize_url(url): return 'http:%s' % url if url.startswith('//') else url def sanitized_Request(url, *args, **kwargs): return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity_with_semicolon): """Transforms an HTML entity to a character.""" entity = entity_with_semicolon[:-1] # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) # TODO: HTML5 allows entities without a semicolon. For example, # '&Eacuteric' should be decoded as 'Éric'. if entity_with_semicolon in compat_html_entities_html5: return compat_html_entities_html5[entity_with_semicolon] mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 # See https://github.com/rg3/youtube-dl/issues/7518 try: return compat_chr(int(numstr, base)) except ValueError: pass # Unknown entity in name, return its literal representation return '&%s;' % entity def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+;)', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible if sys.platform.startswith('java'): return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): # Both in bytes self.downloaded = downloaded self.expected = expected def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs[b'strict'] = True hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc def handle_youtubedl_headers(headers): filtered_headers = headers if 'Youtubedl-no-compression' in filtered_headers: filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') del filtered_headers['Youtubedl-no-compression'] return filtered_headers class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-no-compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): conn_class = compat_http_client.HTTPConnection socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req = update_Request(req, url=url_escaped) for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) req.headers = handle_youtubedl_headers(req.headers) if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') else: location = location.decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] if sys.version_info < (3, 0): location_escaped = location_escaped.encode('utf-8') resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response def make_socks_conn_class(base_class, socks_proxy): assert issubclass(base_class, ( compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection)) url_components = compat_urlparse.urlparse(socks_proxy) if url_components.scheme.lower() == 'socks5': socks_type = ProxyType.SOCKS5 elif url_components.scheme.lower() in ('socks', 'socks4'): socks_type = ProxyType.SOCKS4 elif url_components.scheme.lower() == 'socks4a': socks_type = ProxyType.SOCKS4A def unquote_if_non_empty(s): if not s: return s return compat_urllib_parse_unquote_plus(s) proxy_args = ( socks_type, url_components.hostname, url_components.port or 1080, True, # Remote DNS unquote_if_non_empty(url_components.username), unquote_if_non_empty(url_components.password), ) class SocksConnection(base_class): def connect(self): self.sock = sockssocket() self.sock.setproxy(*proxy_args) if type(self.timeout) in (int, float): self.sock.settimeout(self.timeout) self.sock.connect((self.host, self.port)) if isinstance(self, compat_http_client.HTTPSConnection): if hasattr(self, '_context'): # Python > 2.6 self.sock = self._context.wrap_socket( self.sock, server_hostname=self.host) else: self.sock = ssl.wrap_socket(self.sock) return SocksConnection class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} conn_class = self._https_conn_class if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, True), req, **kwargs) class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def extract_timezone(date_str): m = re.search( r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group('tz'))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) return timezone, date_str def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: timezone, date_str = extract_timezone(date_str) try: date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) except ValueError: pass def date_formats(day_first=True): return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) _, date_str = extract_timezone(date_str) for expression in date_formats(day_first): try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: try: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') except ValueError: pass if upload_date is not None: return compat_str(upload_date) def unified_timestamp(date_str, day_first=True): if date_str is None: return None date_str = date_str.replace(',', ' ') pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0 timezone, date_str = extract_timezone(date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) for expression in date_formats(day_first): try: dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta) return calendar.timegm(dt.timetuple()) except ValueError: pass timetuple = email.utils.parsedate_tz(date_str) if timetuple: return calendar.timegm(timetuple) + pm_delta * 3600 def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download elif guess.rstrip('/') in KNOWN_EXTENSIONS: return guess.rstrip('/') else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad approximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, '%Y%m%d').date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( (b'GetStdHandle', ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( (b'GetConsoleMode', ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return compat_struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: # Some platforms, such as Jython, is missing fcntl try: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) except ImportError: UNSUPPORTED_MSG = 'file locking is not supported on this platform' def _lock_file(f, exclusive): raise IOError(UNSUPPORTED_MSG) def _unlock_file(f): raise IOError(UNSUPPORTED_MSG) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ url, idata = unsmuggle_url(url, {}) data.update(idata) sdata = compat_urllib_parse_urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def lookup_unit_table(unit_table, s): units_re = '|'.join(re.escape(u) for u in unit_table) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = unit_table[m.group('unit')] return int(float(num_str) * mult) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and unofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'kb': 1000, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'mb': 1000 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'gb': 1000 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'tb': 1000 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'pb': 1000 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'eb': 1000 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'zb': 1000 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, 'yb': 1000 ** 8, } return lookup_unit_table(_UNIT_TABLE, s) def parse_count(s): if s is None: return None s = s.strip() if re.match(r'^[\d,.]+$', s): return str_to_int(s) _UNIT_TABLE = { 'k': 1000, 'K': 1000, 'm': 1000 ** 2, 'M': 1000 ** 2, 'kk': 1000 ** 2, 'KK': 1000 ** 2, } return lookup_unit_table(_UNIT_TABLE, s) def month_by_name(name): """ Return the number of a month by (locale-independently) English name """ try: return ENGLISH_MONTH_NAMES.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) # ctypes in Jython is not complete # http://bugs.jython.org/issue2148 if sys.platform.startswith('java'): return try: libc = ctypes.cdll.LoadLibrary('libc.so.6') except OSError: return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): return s[len(start):] if s is not None and s.startswith(start) else s def remove_end(s, end): return s[:-len(end)] if s is not None and s.endswith(end) else s def remove_quotes(s): if s is None or len(s) < 2: return s for quote in ('"', "'", ): if s[0] == quote and s[-1] == quote: return s[1:-1] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] class HEADRequest(compat_urllib_request.Request): def get_method(self): return 'HEAD' class PUTRequest(compat_urllib_request.Request): def get_method(self): return 'PUT' def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def strip_or_none(v): return None if v is None else v.strip() def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() days, hours, mins, secs, ms = [None] * 5 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?$', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match( r'''(?ix)(?:P?T)? (?: (?P<days>[0-9]+)\s*d(?:ays?)?\s* )? (?: (?P<hours>[0-9]+)\s*h(?:ours?)?\s* )? (?: (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s* )? (?: (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s* )?$''', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)$', s) if m: hours, mins = m.groups() else: return None duration = 0 if secs: duration += float(secs) if mins: duration += float(mins) * 60 if hours: duration += float(hours) * 60 * 60 if days: duration += float(days) * 24 * 60 * 60 if ms: duration += float(ms) return duration def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize, use_cache=False): self._pagefunc = pagefunc self._pagesize = pagesize self._use_cache = use_cache if use_cache: self._cache = {} def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = None if self._use_cache: page_results = self._cache.get(pagenum) if page_results is None: page_results = list(self._pagefunc(pagenum)) if self._use_cache: self._cache[pagenum] = page_results startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( netloc=url_parsed.netloc.encode('idna').decode('ascii'), path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii') def update_url_query(url, query): if not query: return url parsed_url = compat_urlparse.urlparse(url) qs = compat_parse_qs(parsed_url.query) qs.update(query) return compat_urlparse.urlunparse(parsed_url._replace( query=compat_urllib_parse_urlencode(qs, True))) def update_Request(req, url=None, data=None, headers={}, query={}): req_headers = req.headers.copy() req_headers.update(headers) req_data = data or req.data req_url = update_url_query(url or req.get_full_url(), query) req_get_method = req.get_method() if req_get_method == 'HEAD': req_type = HEADRequest elif req_get_method == 'PUT': req_type = PUTRequest else: req_type = compat_urllib_request.Request new_req = req_type( req_url, data=req_data, headers=req_headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) if hasattr(req, 'timeout'): new_req.timeout = req.timeout return new_req def dict_get(d, key_or_keys, default=None, skip_false_values=True): if isinstance(key_or_keys, (list, tuple)): for key in key_or_keys: if key not in d or d[key] is None or skip_false_values and not d[key]: continue return d[key] return default return d.get(key_or_keys, default) def try_get(src, getter, expected_type=None): try: v = getter(src) except (AttributeError, KeyError, TypeError, IndexError): pass else: if expected_type is None or isinstance(v, expected_type): return v def encode_compat_str(string, encoding=preferredencoding(), errors='strict'): return string if isinstance(string, compat_str) else compat_str(string, encoding, errors) US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } TV_PARENTAL_GUIDELINES = { 'TV-Y': 0, 'TV-Y7': 7, 'TV-G': 0, 'TV-PG': 0, 'TV-14': 14, 'TV-MA': 17, } def parse_age_limit(s): if type(s) == int: return s if 0 <= s <= 21 else None if not isinstance(s, compat_basestring): return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) if m: return int(m.group('age')) if s in US_RATINGS: return US_RATINGS[s] return TV_PARENTAL_GUIDELINES.get(s) def strip_jsonp(code): return re.sub( r'(?s)^[a-zA-Z0-9_.$]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code) def js_to_json(code): def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v elif v.startswith('/*') or v == ',': return "" if v[0] in ("'", '"'): v = re.sub(r'(?s)\\.|"', lambda m: { '"': '\\"', "\\'": "'", '\\\n': '', '\\x': '\\u00', }.get(m.group(0), m.group(0)), v[1:-1]) INTEGER_TABLE = ( (r'^0[xX][0-9a-fA-F]+', 16), (r'^0+[0-7]+', 8), ) for regex, base in INTEGER_TABLE: im = re.match(regex, v) if im: i = int(im.group(0), base) return '"%d":' % i if v.endswith(':') else '%d' % i return '"%s"' % v return re.sub(r'''(?sx) "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'| /\*.*?\*/|,(?=\s*[\]}])| [a-zA-Z_][.a-zA-Z_0-9]*| \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:\s*:)?| [0-9]+(?=\s*:) ''', fix_kv, code) def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(compat_shlex_quote(a) for a in args) def error_to_compat_str(err): err_str = str(err) # On python 2 error byte string must be decoded with proper # encoding rather than ascii if sys.version_info[0] < 3: err_str = err_str.decode(preferredencoding()) return err_str def mimetype2ext(mt): if mt is None: return None ext = { 'audio/mp4': 'm4a', # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as # it's the most popular one 'audio/mpeg': 'mp3', }.get(mt) if ext is not None: return ext _, _, res = mt.rpartition('/') res = res.lower() return { '3gpp': '3gp', 'smptett+xml': 'tt', 'srt': 'srt', 'ttaf+xml': 'dfxp', 'ttml+xml': 'ttml', 'vtt': 'vtt', 'x-flv': 'flv', 'x-mp4-fragmented': 'mp4', 'x-ms-wmv': 'wmv', 'mpegurl': 'm3u8', 'x-mpegurl': 'm3u8', 'vnd.apple.mpegurl': 'm3u8', 'dash+xml': 'mpd', 'f4m': 'f4m', 'f4m+xml': 'f4m', 'hds+xml': 'f4m', 'vnd.ms-sstr+xml': 'ism', }.get(res, res) def parse_codecs(codecs_str): # http://tools.ietf.org/html/rfc6381 if not codecs_str: return {} splited_codecs = list(filter(None, map( lambda str: str.strip(), codecs_str.strip().strip(',').split(',')))) vcodec, acodec = None, None for full_codec in splited_codecs: codec = full_codec.split('.')[0] if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v'): if not vcodec: vcodec = full_codec elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac'): if not acodec: acodec = full_codec else: write_string('WARNING: Unknown codec %s' % full_codec, sys.stderr) if not vcodec and not acodec: if len(splited_codecs) == 2: return { 'vcodec': vcodec, 'acodec': acodec, } elif len(splited_codecs) == 1: return { 'vcodec': 'none', 'acodec': vcodec, } else: return { 'vcodec': vcodec or 'none', 'acodec': acodec or 'none', } return {} def urlhandle_detect_ext(url_handle): getheader = url_handle.headers.get cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] if m.group('strval') is not None: if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('strval') else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) actual_value = dct.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.')) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): _x = functools.partial(xpath_with_ns, ns_map={ 'ttml': 'http://www.w3.org/ns/ttml', 'ttaf1': 'http://www.w3.org/2006/10/ttaf1', 'ttaf1_0604': 'http://www.w3.org/2006/04/ttaf1', }) class TTMLPElementParser(object): out = '' def start(self, tag, attrib): if tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'): self.out += '\n' def end(self, tag): pass def data(self, data): self.out += data def close(self): return self.out.strip() def parse_node(node): target = TTMLPElementParser() parser = xml.etree.ElementTree.XMLParser(target=target) parser.feed(xml.etree.ElementTree.tostring(node)) return parser.close() dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8')) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall(_x('.//ttaf1_0604:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) dur = parse_dfxp_time_expr(para.attrib.get('dur')) if begin_time is None: continue if not end_time: if not dur: continue end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) if param: param = compat_str(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) return compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'): req.add_header('Ytdl-socks-proxy', proxy) # youtube-dl's http/https handlers do wrapping the socket with socks return None return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type) def ohdave_rsa_encrypt(data, exponent, modulus): ''' Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/ Input: data: data to encrypt, bytes-like object exponent, modulus: parameter e and N of RSA algorithm, both integer Output: hex string of encrypted data Limitation: supports one block encryption only ''' payload = int(binascii.hexlify(data[::-1]), 16) encrypted = pow(payload, exponent, modulus) return '%x' % encrypted def encode_base_n(num, n, table=None): FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' if not table: table = FULL_TABLE[:n] if n > len(table): raise ValueError('base %d exceeds table length %d' % (n, len(table))) if num == 0: return table[0] ret = '' while num: ret = table[num % n] + ret num = num // n return ret def decode_packed_codes(code): mobj = re.search( r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)", code) obfucasted_code, base, count, symbols = mobj.groups() base = int(base) count = int(count) symbols = symbols.split('|') symbol_table = {} while count: count -= 1 base_n_count = encode_base_n(count, base) symbol_table[base_n_count] = symbols[count] or base_n_count return re.sub( r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)], obfucasted_code) def parse_m3u8_attributes(attrib): info = {} for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib): if val.startswith('"'): val = val[1:-1] info[key] = val return info def urshift(val, n): return val >> n if val >= 0 else (val + 0x100000000) >> n # Based on png2str() written by @gdkchan and improved by @yokrysty # Originally posted at https://github.com/rg3/youtube-dl/issues/9706 def decode_png(png_data): # Reference: https://www.w3.org/TR/PNG/ header = png_data[8:] if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR': raise IOError('Not a valid PNG file.') int_map = {1: '>B', 2: '>H', 4: '>I'} unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0] chunks = [] while header: length = unpack_integer(header[:4]) header = header[4:] chunk_type = header[:4] header = header[4:] chunk_data = header[:length] header = header[length:] header = header[4:] # Skip CRC chunks.append({ 'type': chunk_type, 'length': length, 'data': chunk_data }) ihdr = chunks[0]['data'] width = unpack_integer(ihdr[:4]) height = unpack_integer(ihdr[4:8]) idat = b'' for chunk in chunks: if chunk['type'] == b'IDAT': idat += chunk['data'] if not idat: raise IOError('Unable to read PNG data.') decompressed_data = bytearray(zlib.decompress(idat)) stride = width * 3 pixels = [] def _get_pixel(idx): x = idx % stride y = idx // stride return pixels[y][x] for y in range(height): basePos = y * (1 + stride) filter_type = decompressed_data[basePos] current_row = [] pixels.append(current_row) for x in range(stride): color = decompressed_data[1 + basePos + x] basex = y * stride + x left = 0 up = 0 if x > 2: left = _get_pixel(basex - 3) if y > 0: up = _get_pixel(basex - stride) if filter_type == 1: # Sub color = (color + left) & 0xff elif filter_type == 2: # Up color = (color + up) & 0xff elif filter_type == 3: # Average color = (color + ((left + up) >> 1)) & 0xff elif filter_type == 4: # Paeth a = left b = up c = 0 if x > 2 and y > 0: c = _get_pixel(basex - stride - 3) p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: color = (color + a) & 0xff elif pb <= pc: color = (color + b) & 0xff else: color = (color + c) & 0xff current_row.append(color) return width, height, pixels #!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals import base64 import binascii import calendar import codecs import contextlib import ctypes import datetime import email.utils import errno import functools import gzip import io import itertools import json import locale import math import operator import os import pipes import platform import re import socket import ssl import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_HTMLParser, compat_basestring, compat_chr, compat_etree_fromstring, compat_html_entities, compat_html_entities_html5, compat_http_client, compat_kwargs, compat_parse_qs, compat_shlex_quote, compat_socket_create_connection, compat_str, compat_struct_pack, compat_struct_unpack, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, compat_urllib_parse_unquote_plus, compat_urllib_request, compat_urlparse, compat_xpath, ) from .socks import ( ProxyType, sockssocket, ) def register_socks_protocols(): # "Register" SOCKS protocols # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904 # URLs with protocols not in urlparse.uses_netloc are not handled correctly for scheme in ('socks', 'socks4', 'socks4a', 'socks5'): if scheme not in compat_urlparse.uses_netloc: compat_urlparse.uses_netloc.append(scheme) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] KNOWN_EXTENSIONS = ( 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', 'flv', 'f4v', 'f4a', 'f4b', 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', 'mkv', 'mka', 'mk3d', 'avi', 'divx', 'mov', 'asf', 'wmv', 'wma', '3gp', '3g2', 'mp3', 'flac', 'ape', 'wav', 'f4f', 'f4m', 'm3u8', 'smil') # needed for sanitizing filenames in restricted mode ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'], 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy'))) DATE_FORMATS = ( '%d %B %Y', '%d %b %Y', '%B %d %Y', '%b %d %Y', '%b %dst %Y %I:%M', '%b %dnd %Y %I:%M', '%b %dth %Y %I:%M', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', ) DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS) DATE_FORMATS_DAY_FIRST.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d.%m.%y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS) DATE_FORMATS_MONTH_FIRST.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): for f in node.findall(compat_xpath(xpath)): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): return node.find(compat_xpath(xpath)) if isinstance(xpath, (str, compat_str)): n = _find_xpath(xpath) else: for xp in xpath: n = _find_xpath(xp) if n is not None: break if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute('id', id, html) def get_element_by_class(class_name, html): return get_element_by_attribute( 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name), html, escape_value=False) def get_element_by_attribute(attribute, value, html, escape_value=True): """Return the content of the tag with the specified attribute in the passed HTML document""" value = re.escape(value) if escape_value else value m = re.search(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), value), html) if not m: return None res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] return unescapeHTML(res) class HTMLAttributeParser(compat_HTMLParser): """Trivial HTML parser to gather the attributes for a single element""" def __init__(self): self.attrs = {} compat_HTMLParser.__init__(self) def handle_starttag(self, tag, attrs): self.attrs = dict(attrs) def extract_attributes(html_element): """Given a string for an HTML element such as <el a="foo" B="bar" c="&98;az" d=boz empty= noval entity="&amp;" sq='"' dq="'" > Decode and return a dictionary of attributes. { 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz', 'empty': '', 'noval': None, 'entity': '&', 'sq': '"', 'dq': '\'' }. NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions, but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5. """ parser = HTMLAttributeParser() parser.feed(html_element) parser.close() return parser.attrs def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if restricted and char in ACCENT_CHARS: return ACCENT_CHARS[char] if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of # unwanted failures due to missing protocol def sanitize_url(url): return 'http:%s' % url if url.startswith('//') else url def sanitized_Request(url, *args, **kwargs): return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity_with_semicolon): """Transforms an HTML entity to a character.""" entity = entity_with_semicolon[:-1] # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) # TODO: HTML5 allows entities without a semicolon. For example, # '&Eacuteric' should be decoded as 'Éric'. if entity_with_semicolon in compat_html_entities_html5: return compat_html_entities_html5[entity_with_semicolon] mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 # See https://github.com/rg3/youtube-dl/issues/7518 try: return compat_chr(int(numstr, base)) except ValueError: pass # Unknown entity in name, return its literal representation return '&%s;' % entity def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+;)', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible if sys.platform.startswith('java'): return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): # Both in bytes self.downloaded = downloaded self.expected = expected def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs[b'strict'] = True hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc def handle_youtubedl_headers(headers): filtered_headers = headers if 'Youtubedl-no-compression' in filtered_headers: filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') del filtered_headers['Youtubedl-no-compression'] return filtered_headers class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-no-compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): conn_class = compat_http_client.HTTPConnection socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req = update_Request(req, url=url_escaped) for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) req.headers = handle_youtubedl_headers(req.headers) if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') else: location = location.decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] if sys.version_info < (3, 0): location_escaped = location_escaped.encode('utf-8') resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response def make_socks_conn_class(base_class, socks_proxy): assert issubclass(base_class, ( compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection)) url_components = compat_urlparse.urlparse(socks_proxy) if url_components.scheme.lower() == 'socks5': socks_type = ProxyType.SOCKS5 elif url_components.scheme.lower() in ('socks', 'socks4'): socks_type = ProxyType.SOCKS4 elif url_components.scheme.lower() == 'socks4a': socks_type = ProxyType.SOCKS4A def unquote_if_non_empty(s): if not s: return s return compat_urllib_parse_unquote_plus(s) proxy_args = ( socks_type, url_components.hostname, url_components.port or 1080, True, # Remote DNS unquote_if_non_empty(url_components.username), unquote_if_non_empty(url_components.password), ) class SocksConnection(base_class): def connect(self): self.sock = sockssocket() self.sock.setproxy(*proxy_args) if type(self.timeout) in (int, float): self.sock.settimeout(self.timeout) self.sock.connect((self.host, self.port)) if isinstance(self, compat_http_client.HTTPSConnection): if hasattr(self, '_context'): # Python > 2.6 self.sock = self._context.wrap_socket( self.sock, server_hostname=self.host) else: self.sock = ssl.wrap_socket(self.sock) return SocksConnection class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} conn_class = self._https_conn_class if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, True), req, **kwargs) class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def extract_timezone(date_str): m = re.search( r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group('tz'))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) return timezone, date_str def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: timezone, date_str = extract_timezone(date_str) try: date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) except ValueError: pass def date_formats(day_first=True): return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) _, date_str = extract_timezone(date_str) for expression in date_formats(day_first): try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: try: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') except ValueError: pass if upload_date is not None: return compat_str(upload_date) def unified_timestamp(date_str, day_first=True): if date_str is None: return None date_str = date_str.replace(',', ' ') pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0 timezone, date_str = extract_timezone(date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) for expression in date_formats(day_first): try: dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta) return calendar.timegm(dt.timetuple()) except ValueError: pass timetuple = email.utils.parsedate_tz(date_str) if timetuple: return calendar.timegm(timetuple) + pm_delta * 3600 def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download elif guess.rstrip('/') in KNOWN_EXTENSIONS: return guess.rstrip('/') else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad approximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, '%Y%m%d').date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( (b'GetStdHandle', ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( (b'GetConsoleMode', ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return compat_struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: # Some platforms, such as Jython, is missing fcntl try: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) except ImportError: UNSUPPORTED_MSG = 'file locking is not supported on this platform' def _lock_file(f, exclusive): raise IOError(UNSUPPORTED_MSG) def _unlock_file(f): raise IOError(UNSUPPORTED_MSG) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ url, idata = unsmuggle_url(url, {}) data.update(idata) sdata = compat_urllib_parse_urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def lookup_unit_table(unit_table, s): units_re = '|'.join(re.escape(u) for u in unit_table) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = unit_table[m.group('unit')] return int(float(num_str) * mult) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and unofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'kb': 1000, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'mb': 1000 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'gb': 1000 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'tb': 1000 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'pb': 1000 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'eb': 1000 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'zb': 1000 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, 'yb': 1000 ** 8, } return lookup_unit_table(_UNIT_TABLE, s) def parse_count(s): if s is None: return None s = s.strip() if re.match(r'^[\d,.]+$', s): return str_to_int(s) _UNIT_TABLE = { 'k': 1000, 'K': 1000, 'm': 1000 ** 2, 'M': 1000 ** 2, 'kk': 1000 ** 2, 'KK': 1000 ** 2, } return lookup_unit_table(_UNIT_TABLE, s) def month_by_name(name): """ Return the number of a month by (locale-independently) English name """ try: return ENGLISH_MONTH_NAMES.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) # ctypes in Jython is not complete # http://bugs.jython.org/issue2148 if sys.platform.startswith('java'): return try: libc = ctypes.cdll.LoadLibrary('libc.so.6') except OSError: return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): return s[len(start):] if s is not None and s.startswith(start) else s def remove_end(s, end): return s[:-len(end)] if s is not None and s.endswith(end) else s def remove_quotes(s): if s is None or len(s) < 2: return s for quote in ('"', "'", ): if s[0] == quote and s[-1] == quote: return s[1:-1] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] class HEADRequest(compat_urllib_request.Request): def get_method(self): return 'HEAD' class PUTRequest(compat_urllib_request.Request): def get_method(self): return 'PUT' def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def strip_or_none(v): return None if v is None else v.strip() def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() days, hours, mins, secs, ms = [None] * 5 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?$', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match( r'''(?ix)(?:P?T)? (?: (?P<days>[0-9]+)\s*d(?:ays?)?\s* )? (?: (?P<hours>[0-9]+)\s*h(?:ours?)?\s* )? (?: (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s* )? (?: (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s* )?$''', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)$', s) if m: hours, mins = m.groups() else: return None duration = 0 if secs: duration += float(secs) if mins: duration += float(mins) * 60 if hours: duration += float(hours) * 60 * 60 if days: duration += float(days) * 24 * 60 * 60 if ms: duration += float(ms) return duration def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize, use_cache=False): self._pagefunc = pagefunc self._pagesize = pagesize self._use_cache = use_cache if use_cache: self._cache = {} def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = None if self._use_cache: page_results = self._cache.get(pagenum) if page_results is None: page_results = list(self._pagefunc(pagenum)) if self._use_cache: self._cache[pagenum] = page_results startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( netloc=url_parsed.netloc.encode('idna').decode('ascii'), path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii') def update_url_query(url, query): if not query: return url parsed_url = compat_urlparse.urlparse(url) qs = compat_parse_qs(parsed_url.query) qs.update(query) return compat_urlparse.urlunparse(parsed_url._replace( query=compat_urllib_parse_urlencode(qs, True))) def update_Request(req, url=None, data=None, headers={}, query={}): req_headers = req.headers.copy() req_headers.update(headers) req_data = data or req.data req_url = update_url_query(url or req.get_full_url(), query) req_get_method = req.get_method() if req_get_method == 'HEAD': req_type = HEADRequest elif req_get_method == 'PUT': req_type = PUTRequest else: req_type = compat_urllib_request.Request new_req = req_type( req_url, data=req_data, headers=req_headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) if hasattr(req, 'timeout'): new_req.timeout = req.timeout return new_req def dict_get(d, key_or_keys, default=None, skip_false_values=True): if isinstance(key_or_keys, (list, tuple)): for key in key_or_keys: if key not in d or d[key] is None or skip_false_values and not d[key]: continue return d[key] return default return d.get(key_or_keys, default) def try_get(src, getter, expected_type=None): try: v = getter(src) except (AttributeError, KeyError, TypeError, IndexError): pass else: if expected_type is None or isinstance(v, expected_type): return v def encode_compat_str(string, encoding=preferredencoding(), errors='strict'): return string if isinstance(string, compat_str) else compat_str(string, encoding, errors) US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } TV_PARENTAL_GUIDELINES = { 'TV-Y': 0, 'TV-Y7': 7, 'TV-G': 0, 'TV-PG': 0, 'TV-14': 14, 'TV-MA': 17, } def parse_age_limit(s): if type(s) == int: return s if 0 <= s <= 21 else None if not isinstance(s, compat_basestring): return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) if m: return int(m.group('age')) if s in US_RATINGS: return US_RATINGS[s] return TV_PARENTAL_GUIDELINES.get(s) def strip_jsonp(code): return re.sub( r'(?s)^[a-zA-Z0-9_.$]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code) def js_to_json(code): def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v elif v.startswith('/*') or v == ',': return "" if v[0] in ("'", '"'): v = re.sub(r'(?s)\\.|"', lambda m: { '"': '\\"', "\\'": "'", '\\\n': '', '\\x': '\\u00', }.get(m.group(0), m.group(0)), v[1:-1]) INTEGER_TABLE = ( (r'^(0[xX][0-9a-fA-F]+)\s*:?$', 16), (r'^(0+[0-7]+)\s*:?$', 8), ) for regex, base in INTEGER_TABLE: im = re.match(regex, v) if im: i = int(im.group(1), base) return '"%d":' % i if v.endswith(':') else '%d' % i return '"%s"' % v return re.sub(r'''(?sx) "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'| /\*.*?\*/|,(?=\s*[\]}])| [a-zA-Z_][.a-zA-Z_0-9]*| \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:\s*:)?| [0-9]+(?=\s*:) ''', fix_kv, code) def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(compat_shlex_quote(a) for a in args) def error_to_compat_str(err): err_str = str(err) # On python 2 error byte string must be decoded with proper # encoding rather than ascii if sys.version_info[0] < 3: err_str = err_str.decode(preferredencoding()) return err_str def mimetype2ext(mt): if mt is None: return None ext = { 'audio/mp4': 'm4a', # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as # it's the most popular one 'audio/mpeg': 'mp3', }.get(mt) if ext is not None: return ext _, _, res = mt.rpartition('/') res = res.lower() return { '3gpp': '3gp', 'smptett+xml': 'tt', 'srt': 'srt', 'ttaf+xml': 'dfxp', 'ttml+xml': 'ttml', 'vtt': 'vtt', 'x-flv': 'flv', 'x-mp4-fragmented': 'mp4', 'x-ms-wmv': 'wmv', 'mpegurl': 'm3u8', 'x-mpegurl': 'm3u8', 'vnd.apple.mpegurl': 'm3u8', 'dash+xml': 'mpd', 'f4m': 'f4m', 'f4m+xml': 'f4m', 'hds+xml': 'f4m', 'vnd.ms-sstr+xml': 'ism', }.get(res, res) def parse_codecs(codecs_str): # http://tools.ietf.org/html/rfc6381 if not codecs_str: return {} splited_codecs = list(filter(None, map( lambda str: str.strip(), codecs_str.strip().strip(',').split(',')))) vcodec, acodec = None, None for full_codec in splited_codecs: codec = full_codec.split('.')[0] if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v'): if not vcodec: vcodec = full_codec elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac'): if not acodec: acodec = full_codec else: write_string('WARNING: Unknown codec %s' % full_codec, sys.stderr) if not vcodec and not acodec: if len(splited_codecs) == 2: return { 'vcodec': vcodec, 'acodec': acodec, } elif len(splited_codecs) == 1: return { 'vcodec': 'none', 'acodec': vcodec, } else: return { 'vcodec': vcodec or 'none', 'acodec': acodec or 'none', } return {} def urlhandle_detect_ext(url_handle): getheader = url_handle.headers.get cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] if m.group('strval') is not None: if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('strval') else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) actual_value = dct.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.')) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): _x = functools.partial(xpath_with_ns, ns_map={ 'ttml': 'http://www.w3.org/ns/ttml', 'ttaf1': 'http://www.w3.org/2006/10/ttaf1', 'ttaf1_0604': 'http://www.w3.org/2006/04/ttaf1', }) class TTMLPElementParser(object): out = '' def start(self, tag, attrib): if tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'): self.out += '\n' def end(self, tag): pass def data(self, data): self.out += data def close(self): return self.out.strip() def parse_node(node): target = TTMLPElementParser() parser = xml.etree.ElementTree.XMLParser(target=target) parser.feed(xml.etree.ElementTree.tostring(node)) return parser.close() dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8')) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall(_x('.//ttaf1_0604:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) dur = parse_dfxp_time_expr(para.attrib.get('dur')) if begin_time is None: continue if not end_time: if not dur: continue end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) if param: param = compat_str(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) return compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'): req.add_header('Ytdl-socks-proxy', proxy) # youtube-dl's http/https handlers do wrapping the socket with socks return None return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type) def ohdave_rsa_encrypt(data, exponent, modulus): ''' Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/ Input: data: data to encrypt, bytes-like object exponent, modulus: parameter e and N of RSA algorithm, both integer Output: hex string of encrypted data Limitation: supports one block encryption only ''' payload = int(binascii.hexlify(data[::-1]), 16) encrypted = pow(payload, exponent, modulus) return '%x' % encrypted def encode_base_n(num, n, table=None): FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' if not table: table = FULL_TABLE[:n] if n > len(table): raise ValueError('base %d exceeds table length %d' % (n, len(table))) if num == 0: return table[0] ret = '' while num: ret = table[num % n] + ret num = num // n return ret def decode_packed_codes(code): mobj = re.search( r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)", code) obfucasted_code, base, count, symbols = mobj.groups() base = int(base) count = int(count) symbols = symbols.split('|') symbol_table = {} while count: count -= 1 base_n_count = encode_base_n(count, base) symbol_table[base_n_count] = symbols[count] or base_n_count return re.sub( r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)], obfucasted_code) def parse_m3u8_attributes(attrib): info = {} for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib): if val.startswith('"'): val = val[1:-1] info[key] = val return info def urshift(val, n): return val >> n if val >= 0 else (val + 0x100000000) >> n # Based on png2str() written by @gdkchan and improved by @yokrysty # Originally posted at https://github.com/rg3/youtube-dl/issues/9706 def decode_png(png_data): # Reference: https://www.w3.org/TR/PNG/ header = png_data[8:] if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR': raise IOError('Not a valid PNG file.') int_map = {1: '>B', 2: '>H', 4: '>I'} unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0] chunks = [] while header: length = unpack_integer(header[:4]) header = header[4:] chunk_type = header[:4] header = header[4:] chunk_data = header[:length] header = header[length:] header = header[4:] # Skip CRC chunks.append({ 'type': chunk_type, 'length': length, 'data': chunk_data }) ihdr = chunks[0]['data'] width = unpack_integer(ihdr[:4]) height = unpack_integer(ihdr[4:8]) idat = b'' for chunk in chunks: if chunk['type'] == b'IDAT': idat += chunk['data'] if not idat: raise IOError('Unable to read PNG data.') decompressed_data = bytearray(zlib.decompress(idat)) stride = width * 3 pixels = [] def _get_pixel(idx): x = idx % stride y = idx // stride return pixels[y][x] for y in range(height): basePos = y * (1 + stride) filter_type = decompressed_data[basePos] current_row = [] pixels.append(current_row) for x in range(stride): color = decompressed_data[1 + basePos + x] basex = y * stride + x left = 0 up = 0 if x > 2: left = _get_pixel(basex - 3) if y > 0: up = _get_pixel(basex - stride) if filter_type == 1: # Sub color = (color + left) & 0xff elif filter_type == 2: # Up color = (color + up) & 0xff elif filter_type == 3: # Average color = (color + ((left + up) >> 1)) & 0xff elif filter_type == 4: # Paeth a = left b = up c = 0 if x > 2 and y > 0: c = _get_pixel(basex - stride - 3) p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: color = (color + a) & 0xff elif pb <= pc: color = (color + b) & 0xff else: color = (color + c) & 0xff current_row.append(color) return width, height, pixels
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-25-fixed/youtube-dl/youtube_dl/utils.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-25-buggy/youtube-dl/youtube_dl/utils.py
youtube-dl-bug-4
from __future__ import unicode_literals import json import operator import re from .utils import ( ExtractorError, ) _OPERATORS = [ ('|', operator.or_), ('^', operator.xor), ('&', operator.and_), ('>>', operator.rshift), ('<<', operator.lshift), ('-', operator.sub), ('+', operator.add), ('%', operator.mod), ('/', operator.truediv), ('*', operator.mul), ] _ASSIGN_OPERATORS = [(op + '=', opfunc) for op, opfunc in _OPERATORS] _ASSIGN_OPERATORS.append(('=', lambda cur, right: right)) _NAME_RE = r'[a-zA-Z_$][a-zA-Z_$0-9]*' class JSInterpreter(object): def __init__(self, code, objects=None): if objects is None: objects = {} self.code = code self._functions = {} self._objects = objects def interpret_statement(self, stmt, local_vars, allow_recursion=100): if allow_recursion < 0: raise ExtractorError('Recursion limit reached') should_abort = False stmt = stmt.lstrip() stmt_m = re.match(r'var\s', stmt) if stmt_m: expr = stmt[len(stmt_m.group(0)):] else: return_m = re.match(r'return(?:\s+|$)', stmt) if return_m: expr = stmt[len(return_m.group(0)):] should_abort = True else: # Try interpreting it as an expression expr = stmt v = self.interpret_expression(expr, local_vars, allow_recursion) return v, should_abort def interpret_expression(self, expr, local_vars, allow_recursion): expr = expr.strip() if expr == '': # Empty expression return None if expr.startswith('('): parens_count = 0 for m in re.finditer(r'[()]', expr): if m.group(0) == '(': parens_count += 1 else: parens_count -= 1 if parens_count == 0: sub_expr = expr[1:m.start()] sub_result = self.interpret_expression( sub_expr, local_vars, allow_recursion) remaining_expr = expr[m.end():].strip() if not remaining_expr: return sub_result else: expr = json.dumps(sub_result) + remaining_expr break else: raise ExtractorError('Premature end of parens in %r' % expr) for op, opfunc in _ASSIGN_OPERATORS: m = re.match(r'''(?x) (?P<out>%s)(?:\[(?P<index>[^\]]+?)\])? \s*%s (?P<expr>.*)$''' % (_NAME_RE, re.escape(op)), expr) if not m: continue right_val = self.interpret_expression( m.group('expr'), local_vars, allow_recursion - 1) if m.groupdict().get('index'): lvar = local_vars[m.group('out')] idx = self.interpret_expression( m.group('index'), local_vars, allow_recursion) assert isinstance(idx, int) cur = lvar[idx] val = opfunc(cur, right_val) lvar[idx] = val return val else: cur = local_vars.get(m.group('out')) val = opfunc(cur, right_val) local_vars[m.group('out')] = val return val if expr.isdigit(): return int(expr) var_m = re.match( r'(?!if|return|true|false)(?P<name>%s)$' % _NAME_RE, expr) if var_m: return local_vars[var_m.group('name')] try: return json.loads(expr) except ValueError: pass m = re.match( r'(?P<var>%s)\.(?P<member>[^(]+)(?:\(+(?P<args>[^()]*)\))?$' % _NAME_RE, expr) if m: variable = m.group('var') member = m.group('member') arg_str = m.group('args') if variable in local_vars: obj = local_vars[variable] else: if variable not in self._objects: self._objects[variable] = self.extract_object(variable) obj = self._objects[variable] if arg_str is None: # Member access if member == 'length': return len(obj) return obj[member] assert expr.endswith(')') # Function call if arg_str == '': argvals = tuple() else: argvals = tuple([ self.interpret_expression(v, local_vars, allow_recursion) for v in arg_str.split(',')]) if member == 'split': assert argvals == ('',) return list(obj) if member == 'join': assert len(argvals) == 1 return argvals[0].join(obj) if member == 'reverse': assert len(argvals) == 0 obj.reverse() return obj if member == 'slice': assert len(argvals) == 1 return obj[argvals[0]:] if member == 'splice': assert isinstance(obj, list) index, howMany = argvals res = [] for i in range(index, min(index + howMany, len(obj))): res.append(obj.pop(index)) return res return obj[member](argvals) m = re.match( r'(?P<in>%s)\[(?P<idx>.+)\]$' % _NAME_RE, expr) if m: val = local_vars[m.group('in')] idx = self.interpret_expression( m.group('idx'), local_vars, allow_recursion - 1) return val[idx] for op, opfunc in _OPERATORS: m = re.match(r'(?P<x>.+?)%s(?P<y>.+)' % re.escape(op), expr) if not m: continue x, abort = self.interpret_statement( m.group('x'), local_vars, allow_recursion - 1) if abort: raise ExtractorError( 'Premature left-side return of %s in %r' % (op, expr)) y, abort = self.interpret_statement( m.group('y'), local_vars, allow_recursion - 1) if abort: raise ExtractorError( 'Premature right-side return of %s in %r' % (op, expr)) return opfunc(x, y) m = re.match( r'^(?P<func>%s)\((?P<args>[a-zA-Z0-9_$,]+)\)$' % _NAME_RE, expr) if m: fname = m.group('func') argvals = tuple([ int(v) if v.isdigit() else local_vars[v] for v in m.group('args').split(',')]) if fname not in self._functions: self._functions[fname] = self.extract_function(fname) return self._functions[fname](argvals) raise ExtractorError('Unsupported JS expression %r' % expr) def extract_object(self, objname): obj = {} obj_m = re.search( (r'(?:var\s+)?%s\s*=\s*\{' % re.escape(objname)) + r'\s*(?P<fields>([a-zA-Z$0-9]+\s*:\s*function\(.*?\)\s*\{.*?\}(?:,\s*)?)*)' + r'\}\s*;', self.code) fields = obj_m.group('fields') # Currently, it only supports function definitions fields_m = re.finditer( r'(?P<key>[a-zA-Z$0-9]+)\s*:\s*function' r'\((?P<args>[a-z,]+)\){(?P<code>[^}]+)}', fields) for f in fields_m: argnames = f.group('args').split(',') obj[f.group('key')] = self.build_function(argnames, f.group('code')) return obj def extract_function(self, funcname): func_m = re.search( r'''(?x) (?:function\s+%s|[{;,]\s*%s\s*=\s*function|var\s+%s\s*=\s*function)\s* \((?P<args>[^)]*)\)\s* \{(?P<code>[^}]+)\}''' % ( re.escape(funcname), re.escape(funcname), re.escape(funcname)), self.code) if func_m is None: raise ExtractorError('Could not find JS function %r' % funcname) argnames = func_m.group('args').split(',') return self.build_function(argnames, func_m.group('code')) def call_function(self, funcname, *args): f = self.extract_function(funcname) return f(args) def build_function(self, argnames, code): def resf(args): local_vars = dict(zip(argnames, args)) for stmt in code.split(';'): res, abort = self.interpret_statement(stmt, local_vars) if abort: break return res return resf from __future__ import unicode_literals import json import operator import re from .utils import ( ExtractorError, ) _OPERATORS = [ ('|', operator.or_), ('^', operator.xor), ('&', operator.and_), ('>>', operator.rshift), ('<<', operator.lshift), ('-', operator.sub), ('+', operator.add), ('%', operator.mod), ('/', operator.truediv), ('*', operator.mul), ] _ASSIGN_OPERATORS = [(op + '=', opfunc) for op, opfunc in _OPERATORS] _ASSIGN_OPERATORS.append(('=', lambda cur, right: right)) _NAME_RE = r'[a-zA-Z_$][a-zA-Z_$0-9]*' class JSInterpreter(object): def __init__(self, code, objects=None): if objects is None: objects = {} self.code = code self._functions = {} self._objects = objects def interpret_statement(self, stmt, local_vars, allow_recursion=100): if allow_recursion < 0: raise ExtractorError('Recursion limit reached') should_abort = False stmt = stmt.lstrip() stmt_m = re.match(r'var\s', stmt) if stmt_m: expr = stmt[len(stmt_m.group(0)):] else: return_m = re.match(r'return(?:\s+|$)', stmt) if return_m: expr = stmt[len(return_m.group(0)):] should_abort = True else: # Try interpreting it as an expression expr = stmt v = self.interpret_expression(expr, local_vars, allow_recursion) return v, should_abort def interpret_expression(self, expr, local_vars, allow_recursion): expr = expr.strip() if expr == '': # Empty expression return None if expr.startswith('('): parens_count = 0 for m in re.finditer(r'[()]', expr): if m.group(0) == '(': parens_count += 1 else: parens_count -= 1 if parens_count == 0: sub_expr = expr[1:m.start()] sub_result = self.interpret_expression( sub_expr, local_vars, allow_recursion) remaining_expr = expr[m.end():].strip() if not remaining_expr: return sub_result else: expr = json.dumps(sub_result) + remaining_expr break else: raise ExtractorError('Premature end of parens in %r' % expr) for op, opfunc in _ASSIGN_OPERATORS: m = re.match(r'''(?x) (?P<out>%s)(?:\[(?P<index>[^\]]+?)\])? \s*%s (?P<expr>.*)$''' % (_NAME_RE, re.escape(op)), expr) if not m: continue right_val = self.interpret_expression( m.group('expr'), local_vars, allow_recursion - 1) if m.groupdict().get('index'): lvar = local_vars[m.group('out')] idx = self.interpret_expression( m.group('index'), local_vars, allow_recursion) assert isinstance(idx, int) cur = lvar[idx] val = opfunc(cur, right_val) lvar[idx] = val return val else: cur = local_vars.get(m.group('out')) val = opfunc(cur, right_val) local_vars[m.group('out')] = val return val if expr.isdigit(): return int(expr) var_m = re.match( r'(?!if|return|true|false)(?P<name>%s)$' % _NAME_RE, expr) if var_m: return local_vars[var_m.group('name')] try: return json.loads(expr) except ValueError: pass m = re.match( r'(?P<var>%s)\.(?P<member>[^(]+)(?:\(+(?P<args>[^()]*)\))?$' % _NAME_RE, expr) if m: variable = m.group('var') member = m.group('member') arg_str = m.group('args') if variable in local_vars: obj = local_vars[variable] else: if variable not in self._objects: self._objects[variable] = self.extract_object(variable) obj = self._objects[variable] if arg_str is None: # Member access if member == 'length': return len(obj) return obj[member] assert expr.endswith(')') # Function call if arg_str == '': argvals = tuple() else: argvals = tuple([ self.interpret_expression(v, local_vars, allow_recursion) for v in arg_str.split(',')]) if member == 'split': assert argvals == ('',) return list(obj) if member == 'join': assert len(argvals) == 1 return argvals[0].join(obj) if member == 'reverse': assert len(argvals) == 0 obj.reverse() return obj if member == 'slice': assert len(argvals) == 1 return obj[argvals[0]:] if member == 'splice': assert isinstance(obj, list) index, howMany = argvals res = [] for i in range(index, min(index + howMany, len(obj))): res.append(obj.pop(index)) return res return obj[member](argvals) m = re.match( r'(?P<in>%s)\[(?P<idx>.+)\]$' % _NAME_RE, expr) if m: val = local_vars[m.group('in')] idx = self.interpret_expression( m.group('idx'), local_vars, allow_recursion - 1) return val[idx] for op, opfunc in _OPERATORS: m = re.match(r'(?P<x>.+?)%s(?P<y>.+)' % re.escape(op), expr) if not m: continue x, abort = self.interpret_statement( m.group('x'), local_vars, allow_recursion - 1) if abort: raise ExtractorError( 'Premature left-side return of %s in %r' % (op, expr)) y, abort = self.interpret_statement( m.group('y'), local_vars, allow_recursion - 1) if abort: raise ExtractorError( 'Premature right-side return of %s in %r' % (op, expr)) return opfunc(x, y) m = re.match( r'^(?P<func>%s)\((?P<args>[a-zA-Z0-9_$,]*)\)$' % _NAME_RE, expr) if m: fname = m.group('func') argvals = tuple([ int(v) if v.isdigit() else local_vars[v] for v in m.group('args').split(',')]) if len(m.group('args')) > 0 else tuple() if fname not in self._functions: self._functions[fname] = self.extract_function(fname) return self._functions[fname](argvals) raise ExtractorError('Unsupported JS expression %r' % expr) def extract_object(self, objname): obj = {} obj_m = re.search( (r'(?:var\s+)?%s\s*=\s*\{' % re.escape(objname)) + r'\s*(?P<fields>([a-zA-Z$0-9]+\s*:\s*function\(.*?\)\s*\{.*?\}(?:,\s*)?)*)' + r'\}\s*;', self.code) fields = obj_m.group('fields') # Currently, it only supports function definitions fields_m = re.finditer( r'(?P<key>[a-zA-Z$0-9]+)\s*:\s*function' r'\((?P<args>[a-z,]+)\){(?P<code>[^}]+)}', fields) for f in fields_m: argnames = f.group('args').split(',') obj[f.group('key')] = self.build_function(argnames, f.group('code')) return obj def extract_function(self, funcname): func_m = re.search( r'''(?x) (?:function\s+%s|[{;,]\s*%s\s*=\s*function|var\s+%s\s*=\s*function)\s* \((?P<args>[^)]*)\)\s* \{(?P<code>[^}]+)\}''' % ( re.escape(funcname), re.escape(funcname), re.escape(funcname)), self.code) if func_m is None: raise ExtractorError('Could not find JS function %r' % funcname) argnames = func_m.group('args').split(',') return self.build_function(argnames, func_m.group('code')) def call_function(self, funcname, *args): f = self.extract_function(funcname) return f(args) def build_function(self, argnames, code): def resf(args): local_vars = dict(zip(argnames, args)) for stmt in code.split(';'): res, abort = self.interpret_statement(stmt, local_vars) if abort: break return res return resf
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-4-fixed/youtube-dl/youtube_dl/jsinterp.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-4-buggy/youtube-dl/youtube_dl/jsinterp.py
youtube-dl-bug-38
from __future__ import unicode_literals import json import re import socket from .common import InfoExtractor from ..utils import ( compat_http_client, compat_str, compat_urllib_error, compat_urllib_parse, compat_urllib_request, ExtractorError, ) class FacebookIE(InfoExtractor): """Information Extractor for Facebook""" _VALID_URL = r'''(?x) (?:https?://)?(?:\w+\.)?facebook\.com/ (?:[^#?]*\#!/)? (?:video/video\.php|photo\.php|video/embed)\?(?:.*?) (?:v|video_id)=(?P<id>[0-9]+) (?:.*)''' _LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1' _CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1' _NETRC_MACHINE = 'facebook' IE_NAME = 'facebook' _TEST = { 'url': 'https://www.facebook.com/photo.php?v=120708114770723', 'md5': '48975a41ccc4b7a581abd68651c1a5a8', 'info_dict': { 'id': '120708114770723', 'ext': 'mp4', 'duration': 279, 'title': 'PEOPLE ARE AWESOME 2013' } } def report_login(self): """Report attempt to log in.""" self.to_screen('Logging in') def _login(self): (useremail, password) = self._get_login_info() if useremail is None: return login_page_req = compat_urllib_request.Request(self._LOGIN_URL) login_page_req.add_header('Cookie', 'locale=en_US') self.report_login() login_page = self._download_webpage(login_page_req, None, note=False, errnote='Unable to download login page') lsd = self._search_regex( r'<input type="hidden" name="lsd" value="([^"]*)"', login_page, 'lsd') lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd') login_form = { 'email': useremail, 'pass': password, 'lsd': lsd, 'lgnrnd': lgnrnd, 'next': 'http://facebook.com/home.php', 'default_persistent': '0', 'legacy_return': '1', 'timezone': '-60', 'trynum': '1', } request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded') try: login_results = compat_urllib_request.urlopen(request).read() if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None: self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.') return check_form = { 'fb_dtsg': self._search_regex(r'"fb_dtsg":"(.*?)"', login_results, 'fb_dtsg'), 'nh': self._search_regex(r'name="nh" value="(\w*?)"', login_results, 'nh'), 'name_action_selected': 'dont_save', 'submit[Continue]': self._search_regex(r'<input value="(.*?)" name="submit\[Continue\]"', login_results, 'continue'), } check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, compat_urllib_parse.urlencode(check_form)) check_req.add_header('Content-Type', 'application/x-www-form-urlencoded') check_response = compat_urllib_request.urlopen(check_req).read() if re.search(r'id="checkpointSubmitButton"', check_response) is not None: self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self._downloader.report_warning('unable to log in: %s' % compat_str(err)) return def _real_initialize(self): self._login() def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) if mobj is None: raise ExtractorError('Invalid URL: %s' % url) video_id = mobj.group('id') url = 'https://www.facebook.com/video/video.php?v=%s' % video_id webpage = self._download_webpage(url, video_id) BEFORE = '{swf.addParam(param[0], param[1]);});\n' AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});' m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage) if not m: m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage) if m_msg is not None: raise ExtractorError( 'The video is not available, Facebook said: "%s"' % m_msg.group(1), expected=True) else: raise ExtractorError('Cannot parse data') data = dict(json.loads(m.group(1))) params_raw = compat_urllib_parse.unquote(data['params']) params = json.loads(params_raw) video_data = params['video_data'][0] video_url = video_data.get('hd_src') if not video_url: video_url = video_data['sd_src'] if not video_url: raise ExtractorError('Cannot find video URL') video_duration = int(video_data['video_duration']) thumbnail = video_data['thumbnail_src'] video_title = self._html_search_regex( r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, 'title') info = { 'id': video_id, 'title': video_title, 'url': video_url, 'ext': 'mp4', 'duration': video_duration, 'thumbnail': thumbnail, } return [info] from __future__ import unicode_literals import json import re import socket from .common import InfoExtractor from ..utils import ( compat_http_client, compat_str, compat_urllib_error, compat_urllib_parse, compat_urllib_request, urlencode_postdata, ExtractorError, ) class FacebookIE(InfoExtractor): """Information Extractor for Facebook""" _VALID_URL = r'''(?x) (?:https?://)?(?:\w+\.)?facebook\.com/ (?:[^#?]*\#!/)? (?:video/video\.php|photo\.php|video/embed)\?(?:.*?) (?:v|video_id)=(?P<id>[0-9]+) (?:.*)''' _LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1' _CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1' _NETRC_MACHINE = 'facebook' IE_NAME = 'facebook' _TEST = { 'url': 'https://www.facebook.com/photo.php?v=120708114770723', 'md5': '48975a41ccc4b7a581abd68651c1a5a8', 'info_dict': { 'id': '120708114770723', 'ext': 'mp4', 'duration': 279, 'title': 'PEOPLE ARE AWESOME 2013' } } def report_login(self): """Report attempt to log in.""" self.to_screen('Logging in') def _login(self): (useremail, password) = self._get_login_info() if useremail is None: return login_page_req = compat_urllib_request.Request(self._LOGIN_URL) login_page_req.add_header('Cookie', 'locale=en_US') login_page = self._download_webpage(login_page_req, None, note='Downloading login page', errnote='Unable to download login page') lsd = self._search_regex( r'<input type="hidden" name="lsd" value="([^"]*)"', login_page, 'lsd') lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd') login_form = { 'email': useremail, 'pass': password, 'lsd': lsd, 'lgnrnd': lgnrnd, 'next': 'http://facebook.com/home.php', 'default_persistent': '0', 'legacy_return': '1', 'timezone': '-60', 'trynum': '1', } request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded') try: login_results = self._download_webpage(request, None, note='Logging in', errnote='unable to fetch login page') if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None: self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.') return check_form = { 'fb_dtsg': self._search_regex(r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg'), 'nh': self._search_regex(r'name="nh" value="(\w*?)"', login_results, 'nh'), 'name_action_selected': 'dont_save', 'submit[Continue]': self._search_regex(r'<button[^>]+value="(.*?)"[^>]+name="submit\[Continue\]"', login_results, 'continue'), } check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form)) check_req.add_header('Content-Type', 'application/x-www-form-urlencoded') check_response = self._download_webpage(check_req, None, note='Confirming login') if re.search(r'id="checkpointSubmitButton"', check_response) is not None: self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self._downloader.report_warning('unable to log in: %s' % compat_str(err)) return def _real_initialize(self): self._login() def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) if mobj is None: raise ExtractorError('Invalid URL: %s' % url) video_id = mobj.group('id') url = 'https://www.facebook.com/video/video.php?v=%s' % video_id webpage = self._download_webpage(url, video_id) BEFORE = '{swf.addParam(param[0], param[1]);});\n' AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});' m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage) if not m: m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage) if m_msg is not None: raise ExtractorError( 'The video is not available, Facebook said: "%s"' % m_msg.group(1), expected=True) else: raise ExtractorError('Cannot parse data') data = dict(json.loads(m.group(1))) params_raw = compat_urllib_parse.unquote(data['params']) params = json.loads(params_raw) video_data = params['video_data'][0] video_url = video_data.get('hd_src') if not video_url: video_url = video_data['sd_src'] if not video_url: raise ExtractorError('Cannot find video URL') video_duration = int(video_data['video_duration']) thumbnail = video_data['thumbnail_src'] video_title = self._html_search_regex( r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, 'title') info = { 'id': video_id, 'title': video_title, 'url': video_url, 'ext': 'mp4', 'duration': video_duration, 'thumbnail': thumbnail, } return [info]
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-38-fixed/youtube-dl/youtube_dl/extractor/facebook.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-38-buggy/youtube-dl/youtube_dl/extractor/facebook.py
youtube-dl-bug-36
from __future__ import unicode_literals import json import re import socket from .common import InfoExtractor from ..utils import ( compat_http_client, compat_str, compat_urllib_error, compat_urllib_parse, compat_urllib_request, urlencode_postdata, ExtractorError, ) class FacebookIE(InfoExtractor): _VALID_URL = r'''(?x) https?://(?:\w+\.)?facebook\.com/ (?:[^#?]*\#!/)? (?:video/video\.php|photo\.php|video/embed)\?(?:.*?) (?:v|video_id)=(?P<id>[0-9]+) (?:.*)''' _LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1' _CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1' _NETRC_MACHINE = 'facebook' IE_NAME = 'facebook' _TEST = { 'url': 'https://www.facebook.com/photo.php?v=120708114770723', 'md5': '48975a41ccc4b7a581abd68651c1a5a8', 'info_dict': { 'id': '120708114770723', 'ext': 'mp4', 'duration': 279, 'title': 'PEOPLE ARE AWESOME 2013', } } def _login(self): (useremail, password) = self._get_login_info() if useremail is None: return login_page_req = compat_urllib_request.Request(self._LOGIN_URL) login_page_req.add_header('Cookie', 'locale=en_US') login_page = self._download_webpage(login_page_req, None, note='Downloading login page', errnote='Unable to download login page') lsd = self._search_regex( r'<input type="hidden" name="lsd" value="([^"]*)"', login_page, 'lsd') lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd') login_form = { 'email': useremail, 'pass': password, 'lsd': lsd, 'lgnrnd': lgnrnd, 'next': 'http://facebook.com/home.php', 'default_persistent': '0', 'legacy_return': '1', 'timezone': '-60', 'trynum': '1', } request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded') try: login_results = self._download_webpage(request, None, note='Logging in', errnote='unable to fetch login page') if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None: self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.') return check_form = { 'fb_dtsg': self._search_regex(r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg'), 'h': self._search_regex(r'name="h" value="(\w*?)"', login_results, 'h'), 'name_action_selected': 'dont_save', } check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form)) check_req.add_header('Content-Type', 'application/x-www-form-urlencoded') check_response = self._download_webpage(check_req, None, note='Confirming login') if re.search(r'id="checkpointSubmitButton"', check_response) is not None: self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self._downloader.report_warning('unable to log in: %s' % compat_str(err)) return def _real_initialize(self): self._login() def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') url = 'https://www.facebook.com/video/video.php?v=%s' % video_id webpage = self._download_webpage(url, video_id) BEFORE = '{swf.addParam(param[0], param[1]);});\n' AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});' m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage) if not m: m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage) if m_msg is not None: raise ExtractorError( 'The video is not available, Facebook said: "%s"' % m_msg.group(1), expected=True) else: raise ExtractorError('Cannot parse data') data = dict(json.loads(m.group(1))) params_raw = compat_urllib_parse.unquote(data['params']) params = json.loads(params_raw) video_data = params['video_data'][0] video_url = video_data.get('hd_src') if not video_url: video_url = video_data['sd_src'] if not video_url: raise ExtractorError('Cannot find video URL') video_title = self._html_search_regex( r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, 'title') return { 'id': video_id, 'title': video_title, 'url': video_url, 'duration': int(video_data['video_duration']), 'thumbnail': video_data['thumbnail_src'], } from __future__ import unicode_literals import json import re import socket from .common import InfoExtractor from ..utils import ( compat_http_client, compat_str, compat_urllib_error, compat_urllib_parse, compat_urllib_request, urlencode_postdata, ExtractorError, ) class FacebookIE(InfoExtractor): _VALID_URL = r'''(?x) https?://(?:\w+\.)?facebook\.com/ (?:[^#]*?\#!/)? (?:video/video\.php|photo\.php|video/embed)\?(?:.*?) (?:v|video_id)=(?P<id>[0-9]+) (?:.*)''' _LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1' _CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1' _NETRC_MACHINE = 'facebook' IE_NAME = 'facebook' _TEST = { 'url': 'https://www.facebook.com/photo.php?v=120708114770723', 'md5': '48975a41ccc4b7a581abd68651c1a5a8', 'info_dict': { 'id': '120708114770723', 'ext': 'mp4', 'duration': 279, 'title': 'PEOPLE ARE AWESOME 2013', } } def _login(self): (useremail, password) = self._get_login_info() if useremail is None: return login_page_req = compat_urllib_request.Request(self._LOGIN_URL) login_page_req.add_header('Cookie', 'locale=en_US') login_page = self._download_webpage(login_page_req, None, note='Downloading login page', errnote='Unable to download login page') lsd = self._search_regex( r'<input type="hidden" name="lsd" value="([^"]*)"', login_page, 'lsd') lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd') login_form = { 'email': useremail, 'pass': password, 'lsd': lsd, 'lgnrnd': lgnrnd, 'next': 'http://facebook.com/home.php', 'default_persistent': '0', 'legacy_return': '1', 'timezone': '-60', 'trynum': '1', } request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded') try: login_results = self._download_webpage(request, None, note='Logging in', errnote='unable to fetch login page') if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None: self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.') return check_form = { 'fb_dtsg': self._search_regex(r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg'), 'h': self._search_regex(r'name="h" value="(\w*?)"', login_results, 'h'), 'name_action_selected': 'dont_save', } check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form)) check_req.add_header('Content-Type', 'application/x-www-form-urlencoded') check_response = self._download_webpage(check_req, None, note='Confirming login') if re.search(r'id="checkpointSubmitButton"', check_response) is not None: self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self._downloader.report_warning('unable to log in: %s' % compat_str(err)) return def _real_initialize(self): self._login() def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') url = 'https://www.facebook.com/video/video.php?v=%s' % video_id webpage = self._download_webpage(url, video_id) BEFORE = '{swf.addParam(param[0], param[1]);});\n' AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});' m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage) if not m: m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage) if m_msg is not None: raise ExtractorError( 'The video is not available, Facebook said: "%s"' % m_msg.group(1), expected=True) else: raise ExtractorError('Cannot parse data') data = dict(json.loads(m.group(1))) params_raw = compat_urllib_parse.unquote(data['params']) params = json.loads(params_raw) video_data = params['video_data'][0] video_url = video_data.get('hd_src') if not video_url: video_url = video_data['sd_src'] if not video_url: raise ExtractorError('Cannot find video URL') video_title = self._html_search_regex( r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, 'title') return { 'id': video_id, 'title': video_title, 'url': video_url, 'duration': int(video_data['video_duration']), 'thumbnail': video_data['thumbnail_src'], }
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-36-fixed/youtube-dl/youtube_dl/extractor/facebook.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-36-buggy/youtube-dl/youtube_dl/extractor/facebook.py
youtube-dl-bug-30
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import collections import contextlib import datetime import errno import fileinput import io import itertools import json import locale import operator import os import platform import re import shutil import subprocess import socket import sys import time import tokenize import traceback if os.name == 'nt': import ctypes from .compat import ( compat_basestring, compat_cookiejar, compat_expanduser, compat_get_terminal_size, compat_http_client, compat_kwargs, compat_str, compat_tokenize_tokenize, compat_urllib_error, compat_urllib_request, ) from .utils import ( escape_url, ContentTooShortError, date_from_str, DateRange, DEFAULT_OUTTMPL, determine_ext, DownloadError, encodeFilename, ExtractorError, format_bytes, formatSeconds, HEADRequest, locked_file, make_HTTPS_handler, MaxDownloadsReached, PagedList, parse_filesize, PerRequestProxyHandler, PostProcessingError, platform_name, preferredencoding, render_table, SameFileError, sanitize_filename, sanitize_path, std_headers, subtitles_filename, UnavailableVideoError, url_basename, version_tuple, write_json_file, write_string, YoutubeDLHandler, prepend_extension, replace_extension, args_to_str, age_restricted, ) from .cache import Cache from .extractor import get_info_extractor, gen_extractors from .downloader import get_suitable_downloader from .downloader.rtmp import rtmpdump_version from .postprocessor import ( FFmpegFixupM4aPP, FFmpegFixupStretchedPP, FFmpegMergerPP, FFmpegPostProcessor, get_postprocessor, ) from .version import __version__ class YoutubeDL(object): """YoutubeDL class. YoutubeDL objects are the ones responsible of downloading the actual video file and writing it to disk if the user has requested it, among some other tasks. In most cases there should be one per program. As, given a video URL, the downloader doesn't know how to extract all the needed information, task that InfoExtractors do, it has to pass the URL to one of them. For this, YoutubeDL objects have a method that allows InfoExtractors to be registered in a given order. When it is passed a URL, the YoutubeDL object handles it to the first InfoExtractor it finds that reports being able to handle it. The InfoExtractor extracts all the information about the video or videos the URL refers to, and YoutubeDL process the extracted information, possibly using a File Downloader to download the video. YoutubeDL objects accept a lot of parameters. In order not to saturate the object constructor with arguments, it receives a dictionary of options instead. These options are available through the params attribute for the InfoExtractors to use. The YoutubeDL also registers itself as the downloader in charge for the InfoExtractors that are added to it, so this is a "mutual registration". Available options: username: Username for authentication purposes. password: Password for authentication purposes. videopassword: Password for accessing a video. usenetrc: Use netrc for authentication instead. verbose: Print additional info to stdout. quiet: Do not print messages to stdout. no_warnings: Do not print out anything for warnings. forceurl: Force printing final URL. forcetitle: Force printing title. forceid: Force printing ID. forcethumbnail: Force printing thumbnail URL. forcedescription: Force printing description. forcefilename: Force printing final filename. forceduration: Force printing duration. forcejson: Force printing info_dict as JSON. dump_single_json: Force printing the info_dict of the whole playlist (or video) as a single JSON line. simulate: Do not download the video files. format: Video format code. See options.py for more information. outtmpl: Template for output names. restrictfilenames: Do not allow "&" and spaces in file names ignoreerrors: Do not stop on download errors. force_generic_extractor: Force downloader to use the generic extractor nooverwrites: Prevent overwriting files. playliststart: Playlist item to start at. playlistend: Playlist item to end at. playlist_items: Specific indices of playlist to download. playlistreverse: Download playlist items in reverse order. matchtitle: Download only matching titles. rejecttitle: Reject downloads for matching titles. logger: Log messages to a logging.Logger instance. logtostderr: Log messages to stderr instead of stdout. writedescription: Write the video description to a .description file writeinfojson: Write the video description to a .info.json file writeannotations: Write the video annotations to a .annotations.xml file writethumbnail: Write the thumbnail image to a file write_all_thumbnails: Write all thumbnail formats to files writesubtitles: Write the video subtitles to a file writeautomaticsub: Write the automatic subtitles to a file allsubtitles: Downloads all the subtitles of the video (requires writesubtitles or writeautomaticsub) listsubtitles: Lists all available subtitles for the video subtitlesformat: The format code for subtitles subtitleslangs: List of languages of the subtitles to download keepvideo: Keep the video file after post-processing daterange: A DateRange object, download only if the upload_date is in the range. skip_download: Skip the actual download of the video file cachedir: Location of the cache files in the filesystem. False to disable filesystem cache. noplaylist: Download single video instead of a playlist if in doubt. age_limit: An integer representing the user's age in years. Unsuitable videos for the given age are skipped. min_views: An integer representing the minimum view count the video must have in order to not be skipped. Videos without view count information are always downloaded. None for no limit. max_views: An integer representing the maximum view count. Videos that are more popular than that are not downloaded. Videos without view count information are always downloaded. None for no limit. download_archive: File name of a file where all downloads are recorded. Videos already present in the file are not downloaded again. cookiefile: File name where cookies should be read from and dumped to. nocheckcertificate:Do not verify SSL certificates prefer_insecure: Use HTTP instead of HTTPS to retrieve information. At the moment, this is only supported by YouTube. proxy: URL of the proxy server to use cn_verification_proxy: URL of the proxy to use for IP address verification on Chinese sites. (Experimental) socket_timeout: Time to wait for unresponsive hosts, in seconds bidi_workaround: Work around buggy terminals without bidirectional text support, using fridibi debug_printtraffic:Print out sent and received HTTP traffic include_ads: Download ads as well default_search: Prepend this string if an input url is not valid. 'auto' for elaborate guessing encoding: Use this encoding instead of the system-specified. extract_flat: Do not resolve URLs, return the immediate result. Pass in 'in_playlist' to only show this behavior for playlist items. postprocessors: A list of dictionaries, each with an entry * key: The name of the postprocessor. See youtube_dl/postprocessor/__init__.py for a list. as well as any further keyword arguments for the postprocessor. progress_hooks: A list of functions that get called on download progress, with a dictionary with the entries * status: One of "downloading", "error", or "finished". Check this first and ignore unknown values. If status is one of "downloading", or "finished", the following properties may also be present: * filename: The final filename (always present) * tmpfilename: The filename we're currently writing to * downloaded_bytes: Bytes on disk * total_bytes: Size of the whole file, None if unknown * total_bytes_estimate: Guess of the eventual file size, None if unavailable. * elapsed: The number of seconds since download started. * eta: The estimated time in seconds, None if unknown * speed: The download speed in bytes/second, None if unknown * fragment_index: The counter of the currently downloaded video fragment. * fragment_count: The number of fragments (= individual files that will be merged) Progress hooks are guaranteed to be called at least once (with status "finished") if the download is successful. merge_output_format: Extension to use when merging formats. fixup: Automatically correct known faults of the file. One of: - "never": do nothing - "warn": only emit a warning - "detect_or_warn": check whether we can do anything about it, warn otherwise (default) source_address: (Experimental) Client-side IP address to bind to. call_home: Boolean, true iff we are allowed to contact the youtube-dl servers for debugging. sleep_interval: Number of seconds to sleep before each download. listformats: Print an overview of available video formats and exit. list_thumbnails: Print a table of all thumbnails and exit. match_filter: A function that gets called with the info_dict of every video. If it returns a message, the video is ignored. If it returns None, the video is downloaded. match_filter_func in utils.py is one example for this. no_color: Do not emit color codes in output. The following options determine which downloader is picked: external_downloader: Executable of the external downloader to call. None or unset for standard (built-in) downloader. hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv. The following parameters are not used by YoutubeDL itself, they are used by the downloader (see youtube_dl/downloader/common.py): nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test, noresizebuffer, retries, continuedl, noprogress, consoletitle, xattr_set_filesize, external_downloader_args. The following options are used by the post processors: prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available, otherwise prefer avconv. """ params = None _ies = [] _pps = [] _download_retcode = None _num_downloads = None _screen_file = None def __init__(self, params=None, auto_init=True): """Create a FileDownloader object with the given options.""" if params is None: params = {} self._ies = [] self._ies_instances = {} self._pps = [] self._progress_hooks = [] self._download_retcode = 0 self._num_downloads = 0 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] self._err_file = sys.stderr self.params = params self.cache = Cache(self) if params.get('bidi_workaround', False): try: import pty master, slave = pty.openpty() width = compat_get_terminal_size().columns if width is None: width_args = [] else: width_args = ['-w', str(width)] sp_kwargs = dict( stdin=subprocess.PIPE, stdout=slave, stderr=self._err_file) try: self._output_process = subprocess.Popen( ['bidiv'] + width_args, **sp_kwargs ) except OSError: self._output_process = subprocess.Popen( ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs) self._output_channel = os.fdopen(master, 'rb') except OSError as ose: if ose.errno == 2: self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.') else: raise if (sys.version_info >= (3,) and sys.platform != 'win32' and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] and not params.get('restrictfilenames', False)): # On Python 3, the Unicode filesystem API will throw errors (#1474) self.report_warning( 'Assuming --restrict-filenames since file system encoding ' 'cannot encode all characters. ' 'Set the LC_ALL environment variable to fix this.') self.params['restrictfilenames'] = True if isinstance(params.get('outtmpl'), bytes): self.report_warning( 'Parameter outtmpl is bytes, but should be a unicode string. ' 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.') self._setup_opener() if auto_init: self.print_debug_header() self.add_default_info_extractors() for pp_def_raw in self.params.get('postprocessors', []): pp_class = get_postprocessor(pp_def_raw['key']) pp_def = dict(pp_def_raw) del pp_def['key'] pp = pp_class(self, **compat_kwargs(pp_def)) self.add_post_processor(pp) for ph in self.params.get('progress_hooks', []): self.add_progress_hook(ph) def warn_if_short_id(self, argv): # short YouTube ID starting with dash? idxs = [ i for i, a in enumerate(argv) if re.match(r'^-[0-9A-Za-z_-]{10}$', a)] if idxs: correct_argv = ( ['youtube-dl'] + [a for i, a in enumerate(argv) if i not in idxs] + ['--'] + [argv[i] for i in idxs] ) self.report_warning( 'Long argument string detected. ' 'Use -- to separate parameters and URLs, like this:\n%s\n' % args_to_str(correct_argv)) def add_info_extractor(self, ie): """Add an InfoExtractor object to the end of the list.""" self._ies.append(ie) self._ies_instances[ie.ie_key()] = ie ie.set_downloader(self) def get_info_extractor(self, ie_key): """ Get an instance of an IE with name ie_key, it will try to get one from the _ies list, if there's no instance it will create a new one and add it to the extractor list. """ ie = self._ies_instances.get(ie_key) if ie is None: ie = get_info_extractor(ie_key)() self.add_info_extractor(ie) return ie def add_default_info_extractors(self): """ Add the InfoExtractors returned by gen_extractors to the end of the list """ for ie in gen_extractors(): self.add_info_extractor(ie) def add_post_processor(self, pp): """Add a PostProcessor object to the end of the chain.""" self._pps.append(pp) pp.set_downloader(self) def add_progress_hook(self, ph): """Add the progress hook (currently only for the file downloader)""" self._progress_hooks.append(ph) def _bidi_workaround(self, message): if not hasattr(self, '_output_channel'): return message assert hasattr(self, '_output_process') assert isinstance(message, compat_str) line_count = message.count('\n') + 1 self._output_process.stdin.write((message + '\n').encode('utf-8')) self._output_process.stdin.flush() res = ''.join(self._output_channel.readline().decode('utf-8') for _ in range(line_count)) return res[:-len('\n')] def to_screen(self, message, skip_eol=False): """Print message to stdout if not in quiet mode.""" return self.to_stdout(message, skip_eol, check_quiet=True) def _write_string(self, s, out=None): write_string(s, out=out, encoding=self.params.get('encoding')) def to_stdout(self, message, skip_eol=False, check_quiet=False): """Print message to stdout if not in quiet mode.""" if self.params.get('logger'): self.params['logger'].debug(message) elif not check_quiet or not self.params.get('quiet', False): message = self._bidi_workaround(message) terminator = ['\n', ''][skip_eol] output = message + terminator self._write_string(output, self._screen_file) def to_stderr(self, message): """Print message to stderr.""" assert isinstance(message, compat_str) if self.params.get('logger'): self.params['logger'].error(message) else: message = self._bidi_workaround(message) output = message + '\n' self._write_string(output, self._err_file) def to_console_title(self, message): if not self.params.get('consoletitle', False): return if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow(): # c_wchar_p() might not be necessary if `message` is # already of type unicode() ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) elif 'TERM' in os.environ: self._write_string('\033]0;%s\007' % message, self._screen_file) def save_console_title(self): if not self.params.get('consoletitle', False): return if 'TERM' in os.environ: # Save the title on stack self._write_string('\033[22;0t', self._screen_file) def restore_console_title(self): if not self.params.get('consoletitle', False): return if 'TERM' in os.environ: # Restore the title from stack self._write_string('\033[23;0t', self._screen_file) def __enter__(self): self.save_console_title() return self def __exit__(self, *args): self.restore_console_title() if self.params.get('cookiefile') is not None: self.cookiejar.save() def trouble(self, message=None, tb=None): """Determine action to take when a download problem appears. Depending on if the downloader has been configured to ignore download errors or not, this method may throw an exception or not when errors are found, after printing the message. tb, if given, is additional traceback information. """ if message is not None: self.to_stderr(message) if self.params.get('verbose'): if tb is None: if sys.exc_info()[0]: # if .trouble has been called from an except block tb = '' if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info)) tb += compat_str(traceback.format_exc()) else: tb_data = traceback.format_list(traceback.extract_stack()) tb = ''.join(tb_data) self.to_stderr(tb) if not self.params.get('ignoreerrors', False): if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: exc_info = sys.exc_info()[1].exc_info else: exc_info = sys.exc_info() raise DownloadError(message, exc_info) self._download_retcode = 1 def report_warning(self, message): ''' Print the message to stderr, it will be prefixed with 'WARNING:' If stderr is a tty file the 'WARNING:' will be colored ''' if self.params.get('logger') is not None: self.params['logger'].warning(message) else: if self.params.get('no_warnings'): return if not self.params.get('no_color') and self._err_file.isatty() and os.name != 'nt': _msg_header = '\033[0;33mWARNING:\033[0m' else: _msg_header = 'WARNING:' warning_message = '%s %s' % (_msg_header, message) self.to_stderr(warning_message) def report_error(self, message, tb=None): ''' Do the same as trouble, but prefixes the message with 'ERROR:', colored in red if stderr is a tty file. ''' if not self.params.get('no_color') and self._err_file.isatty() and os.name != 'nt': _msg_header = '\033[0;31mERROR:\033[0m' else: _msg_header = 'ERROR:' error_message = '%s %s' % (_msg_header, message) self.trouble(error_message, tb) def report_file_already_downloaded(self, file_name): """Report file has already been fully downloaded.""" try: self.to_screen('[download] %s has already been downloaded' % file_name) except UnicodeEncodeError: self.to_screen('[download] The file has already been downloaded') def prepare_filename(self, info_dict): """Generate the output filename.""" try: template_dict = dict(info_dict) template_dict['epoch'] = int(time.time()) autonumber_size = self.params.get('autonumber_size') if autonumber_size is None: autonumber_size = 5 autonumber_templ = '%0' + str(autonumber_size) + 'd' template_dict['autonumber'] = autonumber_templ % self._num_downloads if template_dict.get('playlist_index') is not None: template_dict['playlist_index'] = '%0*d' % (len(str(template_dict['n_entries'])), template_dict['playlist_index']) if template_dict.get('resolution') is None: if template_dict.get('width') and template_dict.get('height'): template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height']) elif template_dict.get('height'): template_dict['resolution'] = '%sp' % template_dict['height'] elif template_dict.get('width'): template_dict['resolution'] = '?x%d' % template_dict['width'] sanitize = lambda k, v: sanitize_filename( compat_str(v), restricted=self.params.get('restrictfilenames'), is_id=(k == 'id')) template_dict = dict((k, sanitize(k, v)) for k, v in template_dict.items() if v is not None) template_dict = collections.defaultdict(lambda: 'NA', template_dict) outtmpl = sanitize_path(self.params.get('outtmpl', DEFAULT_OUTTMPL)) tmpl = compat_expanduser(outtmpl) filename = tmpl % template_dict # Temporary fix for #4787 # 'Treat' all problem characters by passing filename through preferredencoding # to workaround encoding issues with subprocess on python2 @ Windows if sys.version_info < (3, 0) and sys.platform == 'win32': filename = encodeFilename(filename, True).decode(preferredencoding()) return filename except ValueError as err: self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')') return None def _match_entry(self, info_dict, incomplete): """ Returns None iff the file should be downloaded """ video_title = info_dict.get('title', info_dict.get('id', 'video')) if 'title' in info_dict: # This can happen when we're just evaluating the playlist title = info_dict['title'] matchtitle = self.params.get('matchtitle', False) if matchtitle: if not re.search(matchtitle, title, re.IGNORECASE): return '"' + title + '" title did not match pattern "' + matchtitle + '"' rejecttitle = self.params.get('rejecttitle', False) if rejecttitle: if re.search(rejecttitle, title, re.IGNORECASE): return '"' + title + '" title matched reject pattern "' + rejecttitle + '"' date = info_dict.get('upload_date', None) if date is not None: dateRange = self.params.get('daterange', DateRange()) if date not in dateRange: return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange) view_count = info_dict.get('view_count', None) if view_count is not None: min_views = self.params.get('min_views') if min_views is not None and view_count < min_views: return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views) max_views = self.params.get('max_views') if max_views is not None and view_count > max_views: return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views) if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')): return 'Skipping "%s" because it is age restricted' % video_title if self.in_download_archive(info_dict): return '%s has already been recorded in archive' % video_title if not incomplete: match_filter = self.params.get('match_filter') if match_filter is not None: ret = match_filter(info_dict) if ret is not None: return ret return None @staticmethod def add_extra_info(info_dict, extra_info): '''Set the keys from extra_info in info dict if they are missing''' for key, value in extra_info.items(): info_dict.setdefault(key, value) def extract_info(self, url, download=True, ie_key=None, extra_info={}, process=True, force_generic_extractor=False): ''' Returns a list with a dictionary for each video we find. If 'download', also downloads the videos. extra_info is a dict containing the extra values to add to each result ''' if not ie_key and force_generic_extractor: ie_key = 'Generic' if ie_key: ies = [self.get_info_extractor(ie_key)] else: ies = self._ies for ie in ies: if not ie.suitable(url): continue if not ie.working(): self.report_warning('The program functionality for this site has been marked as broken, ' 'and will probably not work.') try: ie_result = ie.extract(url) if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here) break if isinstance(ie_result, list): # Backwards compatibility: old IE result format ie_result = { '_type': 'compat_list', 'entries': ie_result, } self.add_default_extra_info(ie_result, ie, url) if process: return self.process_ie_result(ie_result, download, extra_info) else: return ie_result except ExtractorError as de: # An error we somewhat expected self.report_error(compat_str(de), de.format_traceback()) break except MaxDownloadsReached: raise except Exception as e: if self.params.get('ignoreerrors', False): self.report_error(compat_str(e), tb=compat_str(traceback.format_exc())) break else: raise else: self.report_error('no suitable InfoExtractor for URL %s' % url) def add_default_extra_info(self, ie_result, ie, url): self.add_extra_info(ie_result, { 'extractor': ie.IE_NAME, 'webpage_url': url, 'webpage_url_basename': url_basename(url), 'extractor_key': ie.ie_key(), }) def process_ie_result(self, ie_result, download=True, extra_info={}): """ Take the result of the ie(may be modified) and resolve all unresolved references (URLs, playlist items). It will also download the videos if 'download'. Returns the resolved ie_result. """ result_type = ie_result.get('_type', 'video') if result_type in ('url', 'url_transparent'): extract_flat = self.params.get('extract_flat', False) if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or extract_flat is True): if self.params.get('forcejson', False): self.to_stdout(json.dumps(ie_result)) return ie_result if result_type == 'video': self.add_extra_info(ie_result, extra_info) return self.process_video_result(ie_result, download=download) elif result_type == 'url': # We have to add extra_info to the results because it may be # contained in a playlist return self.extract_info(ie_result['url'], download, ie_key=ie_result.get('ie_key'), extra_info=extra_info) elif result_type == 'url_transparent': # Use the information from the embedding page info = self.extract_info( ie_result['url'], ie_key=ie_result.get('ie_key'), extra_info=extra_info, download=False, process=False) force_properties = dict( (k, v) for k, v in ie_result.items() if v is not None) for f in ('_type', 'url'): if f in force_properties: del force_properties[f] new_result = info.copy() new_result.update(force_properties) assert new_result.get('_type') != 'url_transparent' return self.process_ie_result( new_result, download=download, extra_info=extra_info) elif result_type == 'playlist' or result_type == 'multi_video': # We process each entry in the playlist playlist = ie_result.get('title', None) or ie_result.get('id', None) self.to_screen('[download] Downloading playlist: %s' % playlist) playlist_results = [] playliststart = self.params.get('playliststart', 1) - 1 playlistend = self.params.get('playlistend', None) # For backwards compatibility, interpret -1 as whole list if playlistend == -1: playlistend = None playlistitems_str = self.params.get('playlist_items', None) playlistitems = None if playlistitems_str is not None: def iter_playlistitems(format): for string_segment in format.split(','): if '-' in string_segment: start, end = string_segment.split('-') for item in range(int(start), int(end) + 1): yield int(item) else: yield int(string_segment) playlistitems = iter_playlistitems(playlistitems_str) ie_entries = ie_result['entries'] if isinstance(ie_entries, list): n_all_entries = len(ie_entries) if playlistitems: entries = [ ie_entries[i - 1] for i in playlistitems if -n_all_entries <= i - 1 < n_all_entries] else: entries = ie_entries[playliststart:playlistend] n_entries = len(entries) self.to_screen( "[%s] playlist %s: Collected %d video ids (downloading %d of them)" % (ie_result['extractor'], playlist, n_all_entries, n_entries)) elif isinstance(ie_entries, PagedList): if playlistitems: entries = [] for item in playlistitems: entries.extend(ie_entries.getslice( item - 1, item )) else: entries = ie_entries.getslice( playliststart, playlistend) n_entries = len(entries) self.to_screen( "[%s] playlist %s: Downloading %d videos" % (ie_result['extractor'], playlist, n_entries)) else: # iterable if playlistitems: entry_list = list(ie_entries) entries = [entry_list[i - 1] for i in playlistitems] else: entries = list(itertools.islice( ie_entries, playliststart, playlistend)) n_entries = len(entries) self.to_screen( "[%s] playlist %s: Downloading %d videos" % (ie_result['extractor'], playlist, n_entries)) if self.params.get('playlistreverse', False): entries = entries[::-1] for i, entry in enumerate(entries, 1): self.to_screen('[download] Downloading video %s of %s' % (i, n_entries)) extra = { 'n_entries': n_entries, 'playlist': playlist, 'playlist_id': ie_result.get('id'), 'playlist_title': ie_result.get('title'), 'playlist_index': i + playliststart, 'extractor': ie_result['extractor'], 'webpage_url': ie_result['webpage_url'], 'webpage_url_basename': url_basename(ie_result['webpage_url']), 'extractor_key': ie_result['extractor_key'], } reason = self._match_entry(entry, incomplete=True) if reason is not None: self.to_screen('[download] ' + reason) continue entry_result = self.process_ie_result(entry, download=download, extra_info=extra) playlist_results.append(entry_result) ie_result['entries'] = playlist_results return ie_result elif result_type == 'compat_list': self.report_warning( 'Extractor %s returned a compat_list result. ' 'It needs to be updated.' % ie_result.get('extractor')) def _fixup(r): self.add_extra_info( r, { 'extractor': ie_result['extractor'], 'webpage_url': ie_result['webpage_url'], 'webpage_url_basename': url_basename(ie_result['webpage_url']), 'extractor_key': ie_result['extractor_key'], } ) return r ie_result['entries'] = [ self.process_ie_result(_fixup(r), download, extra_info) for r in ie_result['entries'] ] return ie_result else: raise Exception('Invalid result type: %s' % result_type) def _build_format_filter(self, filter_spec): " Returns a function to filter the formats according to the filter_spec " OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>width|height|tbr|abr|vbr|asr|filesize|fps) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?) $ ''' % '|'.join(map(re.escape, OPERATORS.keys()))) m = operator_rex.search(filter_spec) if m: try: comparison_value = int(m.group('value')) except ValueError: comparison_value = parse_filesize(m.group('value')) if comparison_value is None: comparison_value = parse_filesize(m.group('value') + 'B') if comparison_value is None: raise ValueError( 'Invalid value %r in format specification %r' % ( m.group('value'), filter_spec)) op = OPERATORS[m.group('op')] if not m: STR_OPERATORS = { '=': operator.eq, '!=': operator.ne, } str_operator_rex = re.compile(r'''(?x) \s*(?P<key>ext|acodec|vcodec|container|protocol) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)? \s*(?P<value>[a-zA-Z0-9_-]+) \s*$ ''' % '|'.join(map(re.escape, STR_OPERATORS.keys()))) m = str_operator_rex.search(filter_spec) if m: comparison_value = m.group('value') op = STR_OPERATORS[m.group('op')] if not m: raise ValueError('Invalid filter specification %r' % filter_spec) def _filter(f): actual_value = f.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) return _filter def build_format_selector(self, format_spec): def syntax_error(note, start): message = ( 'Invalid format specification: ' '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1])) return SyntaxError(message) PICKFIRST = 'PICKFIRST' MERGE = 'MERGE' SINGLE = 'SINGLE' GROUP = 'GROUP' FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters']) def _parse_filter(tokens): filter_parts = [] for type, string, start, _, _ in tokens: if type == tokenize.OP and string == ']': return ''.join(filter_parts) else: filter_parts.append(string) def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False): selectors = [] current_selector = None for type, string, start, _, _ in tokens: # ENCODING is only defined in python 3.x if type == getattr(tokenize, 'ENCODING', None): continue elif type in [tokenize.NAME, tokenize.NUMBER]: current_selector = FormatSelector(SINGLE, string, []) elif type == tokenize.OP: if string == ')': if not inside_group: # ')' will be handled by the parentheses group tokens.restore_last_token() break elif inside_merge and string in ['/', ',']: tokens.restore_last_token() break elif inside_choice and string == ',': tokens.restore_last_token() break elif string == ',': selectors.append(current_selector) current_selector = None elif string == '/': first_choice = current_selector second_choice = _parse_format_selection(tokens, inside_choice=True) current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), []) elif string == '[': if not current_selector: current_selector = FormatSelector(SINGLE, 'best', []) format_filter = _parse_filter(tokens) current_selector.filters.append(format_filter) elif string == '(': if current_selector: raise syntax_error('Unexpected "("', start) group = _parse_format_selection(tokens, inside_group=True) current_selector = FormatSelector(GROUP, group, []) elif string == '+': video_selector = current_selector audio_selector = _parse_format_selection(tokens, inside_merge=True) current_selector = FormatSelector(MERGE, (video_selector, audio_selector), []) else: raise syntax_error('Operator not recognized: "{0}"'.format(string), start) elif type == tokenize.ENDMARKER: break if current_selector: selectors.append(current_selector) return selectors def _build_selector_function(selector): if isinstance(selector, list): fs = [_build_selector_function(s) for s in selector] def selector_function(formats): for f in fs: for format in f(formats): yield format return selector_function elif selector.type == GROUP: selector_function = _build_selector_function(selector.selector) elif selector.type == PICKFIRST: fs = [_build_selector_function(s) for s in selector.selector] def selector_function(formats): for f in fs: picked_formats = list(f(formats)) if picked_formats: return picked_formats return [] elif selector.type == SINGLE: format_spec = selector.selector def selector_function(formats): if format_spec == 'all': for f in formats: yield f elif format_spec in ['best', 'worst', None]: format_idx = 0 if format_spec == 'worst' else -1 audiovideo_formats = [ f for f in formats if f.get('vcodec') != 'none' and f.get('acodec') != 'none'] if audiovideo_formats: yield audiovideo_formats[format_idx] # for audio only (soundcloud) or video only (imgur) urls, select the best/worst audio format elif (all(f.get('acodec') != 'none' for f in formats) or all(f.get('vcodec') != 'none' for f in formats)): yield formats[format_idx] elif format_spec == 'bestaudio': audio_formats = [ f for f in formats if f.get('vcodec') == 'none'] if audio_formats: yield audio_formats[-1] elif format_spec == 'worstaudio': audio_formats = [ f for f in formats if f.get('vcodec') == 'none'] if audio_formats: yield audio_formats[0] elif format_spec == 'bestvideo': video_formats = [ f for f in formats if f.get('acodec') == 'none'] if video_formats: yield video_formats[-1] elif format_spec == 'worstvideo': video_formats = [ f for f in formats if f.get('acodec') == 'none'] if video_formats: yield video_formats[0] else: extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav'] if format_spec in extensions: filter_f = lambda f: f['ext'] == format_spec else: filter_f = lambda f: f['format_id'] == format_spec matches = list(filter(filter_f, formats)) if matches: yield matches[-1] elif selector.type == MERGE: def _merge(formats_info): format_1, format_2 = [f['format_id'] for f in formats_info] # The first format must contain the video and the # second the audio if formats_info[0].get('vcodec') == 'none': self.report_error('The first format must ' 'contain the video, try using ' '"-f %s+%s"' % (format_2, format_1)) return output_ext = ( formats_info[0]['ext'] if self.params.get('merge_output_format') is None else self.params['merge_output_format']) return { 'requested_formats': formats_info, 'format': '%s+%s' % (formats_info[0].get('format'), formats_info[1].get('format')), 'format_id': '%s+%s' % (formats_info[0].get('format_id'), formats_info[1].get('format_id')), 'width': formats_info[0].get('width'), 'height': formats_info[0].get('height'), 'resolution': formats_info[0].get('resolution'), 'fps': formats_info[0].get('fps'), 'vcodec': formats_info[0].get('vcodec'), 'vbr': formats_info[0].get('vbr'), 'stretched_ratio': formats_info[0].get('stretched_ratio'), 'acodec': formats_info[1].get('acodec'), 'abr': formats_info[1].get('abr'), 'ext': output_ext, } video_selector, audio_selector = map(_build_selector_function, selector.selector) def selector_function(formats): formats = list(formats) for pair in itertools.product(video_selector(formats), audio_selector(formats)): yield _merge(pair) filters = [self._build_format_filter(f) for f in selector.filters] def final_selector(formats): for _filter in filters: formats = list(filter(_filter, formats)) return selector_function(formats) return final_selector stream = io.BytesIO(format_spec.encode('utf-8')) try: tokens = list(compat_tokenize_tokenize(stream.readline)) except tokenize.TokenError: raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec))) class TokenIterator(object): def __init__(self, tokens): self.tokens = tokens self.counter = 0 def __iter__(self): return self def __next__(self): if self.counter >= len(self.tokens): raise StopIteration() value = self.tokens[self.counter] self.counter += 1 return value next = __next__ def restore_last_token(self): self.counter -= 1 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens))) return _build_selector_function(parsed_selector) def _calc_headers(self, info_dict): res = std_headers.copy() add_headers = info_dict.get('http_headers') if add_headers: res.update(add_headers) cookies = self._calc_cookies(info_dict) if cookies: res['Cookie'] = cookies return res def _calc_cookies(self, info_dict): pr = compat_urllib_request.Request(info_dict['url']) self.cookiejar.add_cookie_header(pr) return pr.get_header('Cookie') def process_video_result(self, info_dict, download=True): assert info_dict.get('_type', 'video') == 'video' if 'id' not in info_dict: raise ExtractorError('Missing "id" field in extractor result') if 'title' not in info_dict: raise ExtractorError('Missing "title" field in extractor result') if 'playlist' not in info_dict: # It isn't part of a playlist info_dict['playlist'] = None info_dict['playlist_index'] = None thumbnails = info_dict.get('thumbnails') if thumbnails is None: thumbnail = info_dict.get('thumbnail') if thumbnail: info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}] if thumbnails: thumbnails.sort(key=lambda t: ( t.get('preference'), t.get('width'), t.get('height'), t.get('id'), t.get('url'))) for i, t in enumerate(thumbnails): if 'width' in t and 'height' in t: t['resolution'] = '%dx%d' % (t['width'], t['height']) if t.get('id') is None: t['id'] = '%d' % i if thumbnails and 'thumbnail' not in info_dict: info_dict['thumbnail'] = thumbnails[-1]['url'] if 'display_id' not in info_dict and 'id' in info_dict: info_dict['display_id'] = info_dict['id'] if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None: # Working around out-of-range timestamp values (e.g. negative ones on Windows, # see http://bugs.python.org/issue1646728) try: upload_date = datetime.datetime.utcfromtimestamp(info_dict['timestamp']) info_dict['upload_date'] = upload_date.strftime('%Y%m%d') except (ValueError, OverflowError, OSError): pass if self.params.get('listsubtitles', False): if 'automatic_captions' in info_dict: self.list_subtitles(info_dict['id'], info_dict.get('automatic_captions'), 'automatic captions') self.list_subtitles(info_dict['id'], info_dict.get('subtitles'), 'subtitles') return info_dict['requested_subtitles'] = self.process_subtitles( info_dict['id'], info_dict.get('subtitles'), info_dict.get('automatic_captions')) # We now pick which formats have to be downloaded if info_dict.get('formats') is None: # There's only one format available formats = [info_dict] else: formats = info_dict['formats'] if not formats: raise ExtractorError('No video formats found!') formats_dict = {} # We check that all the formats have the format and format_id fields for i, format in enumerate(formats): if 'url' not in format: raise ExtractorError('Missing "url" key in result (index %d)' % i) if format.get('format_id') is None: format['format_id'] = compat_str(i) format_id = format['format_id'] if format_id not in formats_dict: formats_dict[format_id] = [] formats_dict[format_id].append(format) # Make sure all formats have unique format_id for format_id, ambiguous_formats in formats_dict.items(): if len(ambiguous_formats) > 1: for i, format in enumerate(ambiguous_formats): format['format_id'] = '%s-%d' % (format_id, i) for i, format in enumerate(formats): if format.get('format') is None: format['format'] = '{id} - {res}{note}'.format( id=format['format_id'], res=self.format_resolution(format), note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '', ) # Automatically determine file extension if missing if 'ext' not in format: format['ext'] = determine_ext(format['url']).lower() # Add HTTP headers, so that external programs can use them from the # json output full_format_info = info_dict.copy() full_format_info.update(format) format['http_headers'] = self._calc_headers(full_format_info) # TODO Central sorting goes here if formats[0] is not info_dict: # only set the 'formats' fields if the original info_dict list them # otherwise we end up with a circular reference, the first (and unique) # element in the 'formats' field in info_dict is info_dict itself, # wich can't be exported to json info_dict['formats'] = formats if self.params.get('listformats'): self.list_formats(info_dict) return if self.params.get('list_thumbnails'): self.list_thumbnails(info_dict) return req_format = self.params.get('format') if req_format is None: req_format_list = [] if (self.params.get('outtmpl', DEFAULT_OUTTMPL) != '-' and info_dict['extractor'] in ['youtube', 'ted']): merger = FFmpegMergerPP(self) if merger.available and merger.can_merge(): req_format_list.append('bestvideo+bestaudio') req_format_list.append('best') req_format = '/'.join(req_format_list) format_selector = self.build_format_selector(req_format) formats_to_download = list(format_selector(formats)) if not formats_to_download: raise ExtractorError('requested format not available', expected=True) if download: if len(formats_to_download) > 1: self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download))) for format in formats_to_download: new_info = dict(info_dict) new_info.update(format) self.process_info(new_info) # We update the info dict with the best quality format (backwards compatibility) info_dict.update(formats_to_download[-1]) return info_dict def process_subtitles(self, video_id, normal_subtitles, automatic_captions): """Select the requested subtitles and their format""" available_subs = {} if normal_subtitles and self.params.get('writesubtitles'): available_subs.update(normal_subtitles) if automatic_captions and self.params.get('writeautomaticsub'): for lang, cap_info in automatic_captions.items(): if lang not in available_subs: available_subs[lang] = cap_info if (not self.params.get('writesubtitles') and not self.params.get('writeautomaticsub') or not available_subs): return None if self.params.get('allsubtitles', False): requested_langs = available_subs.keys() else: if self.params.get('subtitleslangs', False): requested_langs = self.params.get('subtitleslangs') elif 'en' in available_subs: requested_langs = ['en'] else: requested_langs = [list(available_subs.keys())[0]] formats_query = self.params.get('subtitlesformat', 'best') formats_preference = formats_query.split('/') if formats_query else [] subs = {} for lang in requested_langs: formats = available_subs.get(lang) if formats is None: self.report_warning('%s subtitles not available for %s' % (lang, video_id)) continue for ext in formats_preference: if ext == 'best': f = formats[-1] break matches = list(filter(lambda f: f['ext'] == ext, formats)) if matches: f = matches[-1] break else: f = formats[-1] self.report_warning( 'No subtitle format found matching "%s" for language %s, ' 'using %s' % (formats_query, lang, f['ext'])) subs[lang] = f return subs def process_info(self, info_dict): """Process a single resolved IE result.""" assert info_dict.get('_type', 'video') == 'video' max_downloads = self.params.get('max_downloads') if max_downloads is not None: if self._num_downloads >= int(max_downloads): raise MaxDownloadsReached() info_dict['fulltitle'] = info_dict['title'] if len(info_dict['title']) > 200: info_dict['title'] = info_dict['title'][:197] + '...' if 'format' not in info_dict: info_dict['format'] = info_dict['ext'] reason = self._match_entry(info_dict, incomplete=False) if reason is not None: self.to_screen('[download] ' + reason) return self._num_downloads += 1 info_dict['_filename'] = filename = self.prepare_filename(info_dict) # Forced printings if self.params.get('forcetitle', False): self.to_stdout(info_dict['fulltitle']) if self.params.get('forceid', False): self.to_stdout(info_dict['id']) if self.params.get('forceurl', False): if info_dict.get('requested_formats') is not None: for f in info_dict['requested_formats']: self.to_stdout(f['url'] + f.get('play_path', '')) else: # For RTMP URLs, also include the playpath self.to_stdout(info_dict['url'] + info_dict.get('play_path', '')) if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None: self.to_stdout(info_dict['thumbnail']) if self.params.get('forcedescription', False) and info_dict.get('description') is not None: self.to_stdout(info_dict['description']) if self.params.get('forcefilename', False) and filename is not None: self.to_stdout(filename) if self.params.get('forceduration', False) and info_dict.get('duration') is not None: self.to_stdout(formatSeconds(info_dict['duration'])) if self.params.get('forceformat', False): self.to_stdout(info_dict['format']) if self.params.get('forcejson', False): self.to_stdout(json.dumps(info_dict)) # Do nothing else if in simulate mode if self.params.get('simulate', False): return if filename is None: return try: dn = os.path.dirname(sanitize_path(encodeFilename(filename))) if dn and not os.path.exists(dn): os.makedirs(dn) except (OSError, IOError) as err: self.report_error('unable to create directory ' + compat_str(err)) return if self.params.get('writedescription', False): descfn = replace_extension(filename, 'description', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)): self.to_screen('[info] Video description is already present') elif info_dict.get('description') is None: self.report_warning('There\'s no description to write.') else: try: self.to_screen('[info] Writing video description to: ' + descfn) with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: descfile.write(info_dict['description']) except (OSError, IOError): self.report_error('Cannot write description file ' + descfn) return if self.params.get('writeannotations', False): annofn = replace_extension(filename, 'annotations.xml', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)): self.to_screen('[info] Video annotations are already present') else: try: self.to_screen('[info] Writing video annotations to: ' + annofn) with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile: annofile.write(info_dict['annotations']) except (KeyError, TypeError): self.report_warning('There are no annotations to write.') except (OSError, IOError): self.report_error('Cannot write annotations file: ' + annofn) return subtitles_are_requested = any([self.params.get('writesubtitles', False), self.params.get('writeautomaticsub')]) if subtitles_are_requested and info_dict.get('requested_subtitles'): # subtitles download errors are already managed as troubles in relevant IE # that way it will silently go on when used with unsupporting IE subtitles = info_dict['requested_subtitles'] ie = self.get_info_extractor(info_dict['extractor_key']) for sub_lang, sub_info in subtitles.items(): sub_format = sub_info['ext'] if sub_info.get('data') is not None: sub_data = sub_info['data'] else: try: sub_data = ie._download_webpage( sub_info['url'], info_dict['id'], note=False) except ExtractorError as err: self.report_warning('Unable to download subtitle for "%s": %s' % (sub_lang, compat_str(err.cause))) continue try: sub_filename = subtitles_filename(filename, sub_lang, sub_format) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)): self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format)) else: self.to_screen('[info] Writing video subtitles to: ' + sub_filename) with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile: subfile.write(sub_data) except (OSError, IOError): self.report_error('Cannot write subtitles file ' + sub_filename) return if self.params.get('writeinfojson', False): infofn = replace_extension(filename, 'info.json', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)): self.to_screen('[info] Video description metadata is already present') else: self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn) try: write_json_file(self.filter_requested_info(info_dict), infofn) except (OSError, IOError): self.report_error('Cannot write metadata to JSON file ' + infofn) return self._write_thumbnails(info_dict, filename) if not self.params.get('skip_download', False): try: def dl(name, info): fd = get_suitable_downloader(info, self.params)(self, self.params) for ph in self._progress_hooks: fd.add_progress_hook(ph) if self.params.get('verbose'): self.to_stdout('[debug] Invoking downloader on %r' % info.get('url')) return fd.download(name, info) if info_dict.get('requested_formats') is not None: downloaded = [] success = True merger = FFmpegMergerPP(self) if not merger.available: postprocessors = [] self.report_warning('You have requested multiple ' 'formats but ffmpeg or avconv are not installed.' ' The formats won\'t be merged.') else: postprocessors = [merger] def compatible_formats(formats): video, audio = formats # Check extension video_ext, audio_ext = audio.get('ext'), video.get('ext') if video_ext and audio_ext: COMPATIBLE_EXTS = ( ('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v'), ('webm') ) for exts in COMPATIBLE_EXTS: if video_ext in exts and audio_ext in exts: return True # TODO: Check acodec/vcodec return False filename_real_ext = os.path.splitext(filename)[1][1:] filename_wo_ext = ( os.path.splitext(filename)[0] if filename_real_ext == info_dict['ext'] else filename) requested_formats = info_dict['requested_formats'] if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats): info_dict['ext'] = 'mkv' self.report_warning( 'Requested formats are incompatible for merge and will be merged into mkv.') # Ensure filename always has a correct extension for successful merge filename = '%s.%s' % (filename_wo_ext, info_dict['ext']) if os.path.exists(encodeFilename(filename)): self.to_screen( '[download] %s has already been downloaded and ' 'merged' % filename) else: for f in requested_formats: new_info = dict(info_dict) new_info.update(f) fname = self.prepare_filename(new_info) fname = prepend_extension(fname, 'f%s' % f['format_id'], new_info['ext']) downloaded.append(fname) partial_success = dl(fname, new_info) success = success and partial_success info_dict['__postprocessors'] = postprocessors info_dict['__files_to_merge'] = downloaded else: # Just a single file success = dl(filename, info_dict) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_error('unable to download video data: %s' % str(err)) return except (OSError, IOError) as err: raise UnavailableVideoError(err) except (ContentTooShortError, ) as err: self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) return if success: # Fixup content fixup_policy = self.params.get('fixup') if fixup_policy is None: fixup_policy = 'detect_or_warn' stretched_ratio = info_dict.get('stretched_ratio') if stretched_ratio is not None and stretched_ratio != 1: if fixup_policy == 'warn': self.report_warning('%s: Non-uniform pixel ratio (%s)' % ( info_dict['id'], stretched_ratio)) elif fixup_policy == 'detect_or_warn': stretched_pp = FFmpegFixupStretchedPP(self) if stretched_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(stretched_pp) else: self.report_warning( '%s: Non-uniform pixel ratio (%s). Install ffmpeg or avconv to fix this automatically.' % ( info_dict['id'], stretched_ratio)) else: assert fixup_policy in ('ignore', 'never') if info_dict.get('requested_formats') is None and info_dict.get('container') == 'm4a_dash': if fixup_policy == 'warn': self.report_warning('%s: writing DASH m4a. Only some players support this container.' % ( info_dict['id'])) elif fixup_policy == 'detect_or_warn': fixup_pp = FFmpegFixupM4aPP(self) if fixup_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(fixup_pp) else: self.report_warning( '%s: writing DASH m4a. Only some players support this container. Install ffmpeg or avconv to fix this automatically.' % ( info_dict['id'])) else: assert fixup_policy in ('ignore', 'never') try: self.post_process(filename, info_dict) except (PostProcessingError) as err: self.report_error('postprocessing: %s' % str(err)) return self.record_download_archive(info_dict) def download(self, url_list): """Download a given list of URLs.""" outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) if (len(url_list) > 1 and '%' not in outtmpl and self.params.get('max_downloads') != 1): raise SameFileError(outtmpl) for url in url_list: try: # It also downloads the videos res = self.extract_info( url, force_generic_extractor=self.params.get('force_generic_extractor', False)) except UnavailableVideoError: self.report_error('unable to download video') except MaxDownloadsReached: self.to_screen('[info] Maximum number of downloaded files reached.') raise else: if self.params.get('dump_single_json', False): self.to_stdout(json.dumps(res)) return self._download_retcode def download_with_info_file(self, info_filename): with contextlib.closing(fileinput.FileInput( [info_filename], mode='r', openhook=fileinput.hook_encoded('utf-8'))) as f: # FileInput doesn't have a read method, we can't call json.load info = self.filter_requested_info(json.loads('\n'.join(f))) try: self.process_ie_result(info, download=True) except DownloadError: webpage_url = info.get('webpage_url') if webpage_url is not None: self.report_warning('The info failed to download, trying with "%s"' % webpage_url) return self.download([webpage_url]) else: raise return self._download_retcode @staticmethod def filter_requested_info(info_dict): return dict( (k, v) for k, v in info_dict.items() if k not in ['requested_formats', 'requested_subtitles']) def post_process(self, filename, ie_info): """Run all the postprocessors on the given file.""" info = dict(ie_info) info['filepath'] = filename pps_chain = [] if ie_info.get('__postprocessors') is not None: pps_chain.extend(ie_info['__postprocessors']) pps_chain.extend(self._pps) for pp in pps_chain: files_to_delete = [] try: files_to_delete, info = pp.run(info) except PostProcessingError as e: self.report_error(e.msg) if files_to_delete and not self.params.get('keepvideo', False): for old_filename in files_to_delete: self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename) try: os.remove(encodeFilename(old_filename)) except (IOError, OSError): self.report_warning('Unable to remove downloaded original file') def _make_archive_id(self, info_dict): # Future-proof against any change in case # and backwards compatibility with prior versions extractor = info_dict.get('extractor_key') if extractor is None: if 'id' in info_dict: extractor = info_dict.get('ie_key') # key in a playlist if extractor is None: return None # Incomplete video information return extractor.lower() + ' ' + info_dict['id'] def in_download_archive(self, info_dict): fn = self.params.get('download_archive') if fn is None: return False vid_id = self._make_archive_id(info_dict) if vid_id is None: return False # Incomplete video information try: with locked_file(fn, 'r', encoding='utf-8') as archive_file: for line in archive_file: if line.strip() == vid_id: return True except IOError as ioe: if ioe.errno != errno.ENOENT: raise return False def record_download_archive(self, info_dict): fn = self.params.get('download_archive') if fn is None: return vid_id = self._make_archive_id(info_dict) assert vid_id with locked_file(fn, 'a', encoding='utf-8') as archive_file: archive_file.write(vid_id + '\n') @staticmethod def format_resolution(format, default='unknown'): if format.get('vcodec') == 'none': return 'audio only' if format.get('resolution') is not None: return format['resolution'] if format.get('height') is not None: if format.get('width') is not None: res = '%sx%s' % (format['width'], format['height']) else: res = '%sp' % format['height'] elif format.get('width') is not None: res = '?x%d' % format['width'] else: res = default return res def _format_note(self, fdict): res = '' if fdict.get('ext') in ['f4f', 'f4m']: res += '(unsupported) ' if fdict.get('format_note') is not None: res += fdict['format_note'] + ' ' if fdict.get('tbr') is not None: res += '%4dk ' % fdict['tbr'] if fdict.get('container') is not None: if res: res += ', ' res += '%s container' % fdict['container'] if (fdict.get('vcodec') is not None and fdict.get('vcodec') != 'none'): if res: res += ', ' res += fdict['vcodec'] if fdict.get('vbr') is not None: res += '@' elif fdict.get('vbr') is not None and fdict.get('abr') is not None: res += 'video@' if fdict.get('vbr') is not None: res += '%4dk' % fdict['vbr'] if fdict.get('fps') is not None: res += ', %sfps' % fdict['fps'] if fdict.get('acodec') is not None: if res: res += ', ' if fdict['acodec'] == 'none': res += 'video only' else: res += '%-5s' % fdict['acodec'] elif fdict.get('abr') is not None: if res: res += ', ' res += 'audio' if fdict.get('abr') is not None: res += '@%3dk' % fdict['abr'] if fdict.get('asr') is not None: res += ' (%5dHz)' % fdict['asr'] if fdict.get('filesize') is not None: if res: res += ', ' res += format_bytes(fdict['filesize']) elif fdict.get('filesize_approx') is not None: if res: res += ', ' res += '~' + format_bytes(fdict['filesize_approx']) return res def list_formats(self, info_dict): formats = info_dict.get('formats', [info_dict]) table = [ [f['format_id'], f['ext'], self.format_resolution(f), self._format_note(f)] for f in formats if f.get('preference') is None or f['preference'] >= -1000] if len(formats) > 1: table[-1][-1] += (' ' if table[-1][-1] else '') + '(best)' header_line = ['format code', 'extension', 'resolution', 'note'] self.to_screen( '[info] Available formats for %s:\n%s' % (info_dict['id'], render_table(header_line, table))) def list_thumbnails(self, info_dict): thumbnails = info_dict.get('thumbnails') if not thumbnails: tn_url = info_dict.get('thumbnail') if tn_url: thumbnails = [{'id': '0', 'url': tn_url}] else: self.to_screen( '[info] No thumbnails present for %s' % info_dict['id']) return self.to_screen( '[info] Thumbnails for %s:' % info_dict['id']) self.to_screen(render_table( ['ID', 'width', 'height', 'URL'], [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails])) def list_subtitles(self, video_id, subtitles, name='subtitles'): if not subtitles: self.to_screen('%s has no %s' % (video_id, name)) return self.to_screen( 'Available %s for %s:' % (name, video_id)) self.to_screen(render_table( ['Language', 'formats'], [[lang, ', '.join(f['ext'] for f in reversed(formats))] for lang, formats in subtitles.items()])) def urlopen(self, req): """ Start an HTTP download """ # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one req_is_string = isinstance(req, compat_basestring) url = req if req_is_string else req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: if req_is_string: req = url_escaped else: req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request req = req_type( url_escaped, data=req.data, headers=req.headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) return self._opener.open(req, timeout=self._socket_timeout) def print_debug_header(self): if not self.params.get('verbose'): return if type('') is not compat_str: # Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326) self.report_warning( 'Your Python is broken! Update to a newer and supported version') stdout_encoding = getattr( sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__) encoding_str = ( '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % ( locale.getpreferredencoding(), sys.getfilesystemencoding(), stdout_encoding, self.get_encoding())) write_string(encoding_str, encoding=None) self._write_string('[debug] youtube-dl version ' + __version__ + '\n') try: sp = subprocess.Popen( ['git', 'rev-parse', '--short', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=os.path.dirname(os.path.abspath(__file__))) out, err = sp.communicate() out = out.decode().strip() if re.match('[0-9a-f]+', out): self._write_string('[debug] Git HEAD: ' + out + '\n') except Exception: try: sys.exc_clear() except Exception: pass self._write_string('[debug] Python version %s - %s\n' % ( platform.python_version(), platform_name())) exe_versions = FFmpegPostProcessor.get_versions(self) exe_versions['rtmpdump'] = rtmpdump_version() exe_str = ', '.join( '%s %s' % (exe, v) for exe, v in sorted(exe_versions.items()) if v ) if not exe_str: exe_str = 'none' self._write_string('[debug] exe versions: %s\n' % exe_str) proxy_map = {} for handler in self._opener.handlers: if hasattr(handler, 'proxies'): proxy_map.update(handler.proxies) self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n') if self.params.get('call_home', False): ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8') self._write_string('[debug] Public IP address: %s\n' % ipaddr) latest_version = self.urlopen( 'https://yt-dl.org/latest/version').read().decode('utf-8') if version_tuple(latest_version) > version_tuple(__version__): self.report_warning( 'You are using an outdated version (newest version: %s)! ' 'See https://yt-dl.org/update if you need help updating.' % latest_version) def _setup_opener(self): timeout_val = self.params.get('socket_timeout') self._socket_timeout = 600 if timeout_val is None else float(timeout_val) opts_cookiefile = self.params.get('cookiefile') opts_proxy = self.params.get('proxy') if opts_cookiefile is None: self.cookiejar = compat_cookiejar.CookieJar() else: self.cookiejar = compat_cookiejar.MozillaCookieJar( opts_cookiefile) if os.access(opts_cookiefile, os.R_OK): self.cookiejar.load() cookie_processor = compat_urllib_request.HTTPCookieProcessor( self.cookiejar) if opts_proxy is not None: if opts_proxy == '': proxies = {} else: proxies = {'http': opts_proxy, 'https': opts_proxy} else: proxies = compat_urllib_request.getproxies() # Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805) if 'http' in proxies and 'https' not in proxies: proxies['https'] = proxies['http'] proxy_handler = PerRequestProxyHandler(proxies) debuglevel = 1 if self.params.get('debug_printtraffic') else 0 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel) ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel) opener = compat_urllib_request.build_opener( proxy_handler, https_handler, cookie_processor, ydlh) # Delete the default user-agent header, which would otherwise apply in # cases where our custom HTTP handler doesn't come into play # (See https://github.com/rg3/youtube-dl/issues/1309 for details) opener.addheaders = [] self._opener = opener def encode(self, s): if isinstance(s, bytes): return s # Already encoded try: return s.encode(self.get_encoding()) except UnicodeEncodeError as err: err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.' raise def get_encoding(self): encoding = self.params.get('encoding') if encoding is None: encoding = preferredencoding() return encoding def _write_thumbnails(self, info_dict, filename): if self.params.get('writethumbnail', False): thumbnails = info_dict.get('thumbnails') if thumbnails: thumbnails = [thumbnails[-1]] elif self.params.get('write_all_thumbnails', False): thumbnails = info_dict.get('thumbnails') else: return if not thumbnails: # No thumbnails present, so return immediately return for t in thumbnails: thumb_ext = determine_ext(t['url'], 'jpg') suffix = '_%s' % t['id'] if len(thumbnails) > 1 else '' thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else '' t['filename'] = thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)): self.to_screen('[%s] %s: Thumbnail %sis already present' % (info_dict['extractor'], info_dict['id'], thumb_display_id)) else: self.to_screen('[%s] %s: Downloading thumbnail %s...' % (info_dict['extractor'], info_dict['id'], thumb_display_id)) try: uf = self.urlopen(t['url']) with open(thumb_filename, 'wb') as thumbf: shutil.copyfileobj(uf, thumbf) self.to_screen('[%s] %s: Writing thumbnail %sto: %s' % (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename)) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_warning('Unable to download thumbnail "%s": %s' % (t['url'], compat_str(err))) #!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import collections import contextlib import datetime import errno import fileinput import io import itertools import json import locale import operator import os import platform import re import shutil import subprocess import socket import sys import time import tokenize import traceback if os.name == 'nt': import ctypes from .compat import ( compat_basestring, compat_cookiejar, compat_expanduser, compat_get_terminal_size, compat_http_client, compat_kwargs, compat_str, compat_tokenize_tokenize, compat_urllib_error, compat_urllib_request, ) from .utils import ( escape_url, ContentTooShortError, date_from_str, DateRange, DEFAULT_OUTTMPL, determine_ext, DownloadError, encodeFilename, ExtractorError, format_bytes, formatSeconds, HEADRequest, locked_file, make_HTTPS_handler, MaxDownloadsReached, PagedList, parse_filesize, PerRequestProxyHandler, PostProcessingError, platform_name, preferredencoding, render_table, SameFileError, sanitize_filename, sanitize_path, std_headers, subtitles_filename, UnavailableVideoError, url_basename, version_tuple, write_json_file, write_string, YoutubeDLHandler, prepend_extension, replace_extension, args_to_str, age_restricted, ) from .cache import Cache from .extractor import get_info_extractor, gen_extractors from .downloader import get_suitable_downloader from .downloader.rtmp import rtmpdump_version from .postprocessor import ( FFmpegFixupM4aPP, FFmpegFixupStretchedPP, FFmpegMergerPP, FFmpegPostProcessor, get_postprocessor, ) from .version import __version__ class YoutubeDL(object): """YoutubeDL class. YoutubeDL objects are the ones responsible of downloading the actual video file and writing it to disk if the user has requested it, among some other tasks. In most cases there should be one per program. As, given a video URL, the downloader doesn't know how to extract all the needed information, task that InfoExtractors do, it has to pass the URL to one of them. For this, YoutubeDL objects have a method that allows InfoExtractors to be registered in a given order. When it is passed a URL, the YoutubeDL object handles it to the first InfoExtractor it finds that reports being able to handle it. The InfoExtractor extracts all the information about the video or videos the URL refers to, and YoutubeDL process the extracted information, possibly using a File Downloader to download the video. YoutubeDL objects accept a lot of parameters. In order not to saturate the object constructor with arguments, it receives a dictionary of options instead. These options are available through the params attribute for the InfoExtractors to use. The YoutubeDL also registers itself as the downloader in charge for the InfoExtractors that are added to it, so this is a "mutual registration". Available options: username: Username for authentication purposes. password: Password for authentication purposes. videopassword: Password for accessing a video. usenetrc: Use netrc for authentication instead. verbose: Print additional info to stdout. quiet: Do not print messages to stdout. no_warnings: Do not print out anything for warnings. forceurl: Force printing final URL. forcetitle: Force printing title. forceid: Force printing ID. forcethumbnail: Force printing thumbnail URL. forcedescription: Force printing description. forcefilename: Force printing final filename. forceduration: Force printing duration. forcejson: Force printing info_dict as JSON. dump_single_json: Force printing the info_dict of the whole playlist (or video) as a single JSON line. simulate: Do not download the video files. format: Video format code. See options.py for more information. outtmpl: Template for output names. restrictfilenames: Do not allow "&" and spaces in file names ignoreerrors: Do not stop on download errors. force_generic_extractor: Force downloader to use the generic extractor nooverwrites: Prevent overwriting files. playliststart: Playlist item to start at. playlistend: Playlist item to end at. playlist_items: Specific indices of playlist to download. playlistreverse: Download playlist items in reverse order. matchtitle: Download only matching titles. rejecttitle: Reject downloads for matching titles. logger: Log messages to a logging.Logger instance. logtostderr: Log messages to stderr instead of stdout. writedescription: Write the video description to a .description file writeinfojson: Write the video description to a .info.json file writeannotations: Write the video annotations to a .annotations.xml file writethumbnail: Write the thumbnail image to a file write_all_thumbnails: Write all thumbnail formats to files writesubtitles: Write the video subtitles to a file writeautomaticsub: Write the automatic subtitles to a file allsubtitles: Downloads all the subtitles of the video (requires writesubtitles or writeautomaticsub) listsubtitles: Lists all available subtitles for the video subtitlesformat: The format code for subtitles subtitleslangs: List of languages of the subtitles to download keepvideo: Keep the video file after post-processing daterange: A DateRange object, download only if the upload_date is in the range. skip_download: Skip the actual download of the video file cachedir: Location of the cache files in the filesystem. False to disable filesystem cache. noplaylist: Download single video instead of a playlist if in doubt. age_limit: An integer representing the user's age in years. Unsuitable videos for the given age are skipped. min_views: An integer representing the minimum view count the video must have in order to not be skipped. Videos without view count information are always downloaded. None for no limit. max_views: An integer representing the maximum view count. Videos that are more popular than that are not downloaded. Videos without view count information are always downloaded. None for no limit. download_archive: File name of a file where all downloads are recorded. Videos already present in the file are not downloaded again. cookiefile: File name where cookies should be read from and dumped to. nocheckcertificate:Do not verify SSL certificates prefer_insecure: Use HTTP instead of HTTPS to retrieve information. At the moment, this is only supported by YouTube. proxy: URL of the proxy server to use cn_verification_proxy: URL of the proxy to use for IP address verification on Chinese sites. (Experimental) socket_timeout: Time to wait for unresponsive hosts, in seconds bidi_workaround: Work around buggy terminals without bidirectional text support, using fridibi debug_printtraffic:Print out sent and received HTTP traffic include_ads: Download ads as well default_search: Prepend this string if an input url is not valid. 'auto' for elaborate guessing encoding: Use this encoding instead of the system-specified. extract_flat: Do not resolve URLs, return the immediate result. Pass in 'in_playlist' to only show this behavior for playlist items. postprocessors: A list of dictionaries, each with an entry * key: The name of the postprocessor. See youtube_dl/postprocessor/__init__.py for a list. as well as any further keyword arguments for the postprocessor. progress_hooks: A list of functions that get called on download progress, with a dictionary with the entries * status: One of "downloading", "error", or "finished". Check this first and ignore unknown values. If status is one of "downloading", or "finished", the following properties may also be present: * filename: The final filename (always present) * tmpfilename: The filename we're currently writing to * downloaded_bytes: Bytes on disk * total_bytes: Size of the whole file, None if unknown * total_bytes_estimate: Guess of the eventual file size, None if unavailable. * elapsed: The number of seconds since download started. * eta: The estimated time in seconds, None if unknown * speed: The download speed in bytes/second, None if unknown * fragment_index: The counter of the currently downloaded video fragment. * fragment_count: The number of fragments (= individual files that will be merged) Progress hooks are guaranteed to be called at least once (with status "finished") if the download is successful. merge_output_format: Extension to use when merging formats. fixup: Automatically correct known faults of the file. One of: - "never": do nothing - "warn": only emit a warning - "detect_or_warn": check whether we can do anything about it, warn otherwise (default) source_address: (Experimental) Client-side IP address to bind to. call_home: Boolean, true iff we are allowed to contact the youtube-dl servers for debugging. sleep_interval: Number of seconds to sleep before each download. listformats: Print an overview of available video formats and exit. list_thumbnails: Print a table of all thumbnails and exit. match_filter: A function that gets called with the info_dict of every video. If it returns a message, the video is ignored. If it returns None, the video is downloaded. match_filter_func in utils.py is one example for this. no_color: Do not emit color codes in output. The following options determine which downloader is picked: external_downloader: Executable of the external downloader to call. None or unset for standard (built-in) downloader. hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv. The following parameters are not used by YoutubeDL itself, they are used by the downloader (see youtube_dl/downloader/common.py): nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test, noresizebuffer, retries, continuedl, noprogress, consoletitle, xattr_set_filesize, external_downloader_args. The following options are used by the post processors: prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available, otherwise prefer avconv. """ params = None _ies = [] _pps = [] _download_retcode = None _num_downloads = None _screen_file = None def __init__(self, params=None, auto_init=True): """Create a FileDownloader object with the given options.""" if params is None: params = {} self._ies = [] self._ies_instances = {} self._pps = [] self._progress_hooks = [] self._download_retcode = 0 self._num_downloads = 0 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] self._err_file = sys.stderr self.params = params self.cache = Cache(self) if params.get('bidi_workaround', False): try: import pty master, slave = pty.openpty() width = compat_get_terminal_size().columns if width is None: width_args = [] else: width_args = ['-w', str(width)] sp_kwargs = dict( stdin=subprocess.PIPE, stdout=slave, stderr=self._err_file) try: self._output_process = subprocess.Popen( ['bidiv'] + width_args, **sp_kwargs ) except OSError: self._output_process = subprocess.Popen( ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs) self._output_channel = os.fdopen(master, 'rb') except OSError as ose: if ose.errno == 2: self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.') else: raise if (sys.version_info >= (3,) and sys.platform != 'win32' and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] and not params.get('restrictfilenames', False)): # On Python 3, the Unicode filesystem API will throw errors (#1474) self.report_warning( 'Assuming --restrict-filenames since file system encoding ' 'cannot encode all characters. ' 'Set the LC_ALL environment variable to fix this.') self.params['restrictfilenames'] = True if isinstance(params.get('outtmpl'), bytes): self.report_warning( 'Parameter outtmpl is bytes, but should be a unicode string. ' 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.') self._setup_opener() if auto_init: self.print_debug_header() self.add_default_info_extractors() for pp_def_raw in self.params.get('postprocessors', []): pp_class = get_postprocessor(pp_def_raw['key']) pp_def = dict(pp_def_raw) del pp_def['key'] pp = pp_class(self, **compat_kwargs(pp_def)) self.add_post_processor(pp) for ph in self.params.get('progress_hooks', []): self.add_progress_hook(ph) def warn_if_short_id(self, argv): # short YouTube ID starting with dash? idxs = [ i for i, a in enumerate(argv) if re.match(r'^-[0-9A-Za-z_-]{10}$', a)] if idxs: correct_argv = ( ['youtube-dl'] + [a for i, a in enumerate(argv) if i not in idxs] + ['--'] + [argv[i] for i in idxs] ) self.report_warning( 'Long argument string detected. ' 'Use -- to separate parameters and URLs, like this:\n%s\n' % args_to_str(correct_argv)) def add_info_extractor(self, ie): """Add an InfoExtractor object to the end of the list.""" self._ies.append(ie) self._ies_instances[ie.ie_key()] = ie ie.set_downloader(self) def get_info_extractor(self, ie_key): """ Get an instance of an IE with name ie_key, it will try to get one from the _ies list, if there's no instance it will create a new one and add it to the extractor list. """ ie = self._ies_instances.get(ie_key) if ie is None: ie = get_info_extractor(ie_key)() self.add_info_extractor(ie) return ie def add_default_info_extractors(self): """ Add the InfoExtractors returned by gen_extractors to the end of the list """ for ie in gen_extractors(): self.add_info_extractor(ie) def add_post_processor(self, pp): """Add a PostProcessor object to the end of the chain.""" self._pps.append(pp) pp.set_downloader(self) def add_progress_hook(self, ph): """Add the progress hook (currently only for the file downloader)""" self._progress_hooks.append(ph) def _bidi_workaround(self, message): if not hasattr(self, '_output_channel'): return message assert hasattr(self, '_output_process') assert isinstance(message, compat_str) line_count = message.count('\n') + 1 self._output_process.stdin.write((message + '\n').encode('utf-8')) self._output_process.stdin.flush() res = ''.join(self._output_channel.readline().decode('utf-8') for _ in range(line_count)) return res[:-len('\n')] def to_screen(self, message, skip_eol=False): """Print message to stdout if not in quiet mode.""" return self.to_stdout(message, skip_eol, check_quiet=True) def _write_string(self, s, out=None): write_string(s, out=out, encoding=self.params.get('encoding')) def to_stdout(self, message, skip_eol=False, check_quiet=False): """Print message to stdout if not in quiet mode.""" if self.params.get('logger'): self.params['logger'].debug(message) elif not check_quiet or not self.params.get('quiet', False): message = self._bidi_workaround(message) terminator = ['\n', ''][skip_eol] output = message + terminator self._write_string(output, self._screen_file) def to_stderr(self, message): """Print message to stderr.""" assert isinstance(message, compat_str) if self.params.get('logger'): self.params['logger'].error(message) else: message = self._bidi_workaround(message) output = message + '\n' self._write_string(output, self._err_file) def to_console_title(self, message): if not self.params.get('consoletitle', False): return if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow(): # c_wchar_p() might not be necessary if `message` is # already of type unicode() ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) elif 'TERM' in os.environ: self._write_string('\033]0;%s\007' % message, self._screen_file) def save_console_title(self): if not self.params.get('consoletitle', False): return if 'TERM' in os.environ: # Save the title on stack self._write_string('\033[22;0t', self._screen_file) def restore_console_title(self): if not self.params.get('consoletitle', False): return if 'TERM' in os.environ: # Restore the title from stack self._write_string('\033[23;0t', self._screen_file) def __enter__(self): self.save_console_title() return self def __exit__(self, *args): self.restore_console_title() if self.params.get('cookiefile') is not None: self.cookiejar.save() def trouble(self, message=None, tb=None): """Determine action to take when a download problem appears. Depending on if the downloader has been configured to ignore download errors or not, this method may throw an exception or not when errors are found, after printing the message. tb, if given, is additional traceback information. """ if message is not None: self.to_stderr(message) if self.params.get('verbose'): if tb is None: if sys.exc_info()[0]: # if .trouble has been called from an except block tb = '' if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info)) tb += compat_str(traceback.format_exc()) else: tb_data = traceback.format_list(traceback.extract_stack()) tb = ''.join(tb_data) self.to_stderr(tb) if not self.params.get('ignoreerrors', False): if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: exc_info = sys.exc_info()[1].exc_info else: exc_info = sys.exc_info() raise DownloadError(message, exc_info) self._download_retcode = 1 def report_warning(self, message): ''' Print the message to stderr, it will be prefixed with 'WARNING:' If stderr is a tty file the 'WARNING:' will be colored ''' if self.params.get('logger') is not None: self.params['logger'].warning(message) else: if self.params.get('no_warnings'): return if not self.params.get('no_color') and self._err_file.isatty() and os.name != 'nt': _msg_header = '\033[0;33mWARNING:\033[0m' else: _msg_header = 'WARNING:' warning_message = '%s %s' % (_msg_header, message) self.to_stderr(warning_message) def report_error(self, message, tb=None): ''' Do the same as trouble, but prefixes the message with 'ERROR:', colored in red if stderr is a tty file. ''' if not self.params.get('no_color') and self._err_file.isatty() and os.name != 'nt': _msg_header = '\033[0;31mERROR:\033[0m' else: _msg_header = 'ERROR:' error_message = '%s %s' % (_msg_header, message) self.trouble(error_message, tb) def report_file_already_downloaded(self, file_name): """Report file has already been fully downloaded.""" try: self.to_screen('[download] %s has already been downloaded' % file_name) except UnicodeEncodeError: self.to_screen('[download] The file has already been downloaded') def prepare_filename(self, info_dict): """Generate the output filename.""" try: template_dict = dict(info_dict) template_dict['epoch'] = int(time.time()) autonumber_size = self.params.get('autonumber_size') if autonumber_size is None: autonumber_size = 5 autonumber_templ = '%0' + str(autonumber_size) + 'd' template_dict['autonumber'] = autonumber_templ % self._num_downloads if template_dict.get('playlist_index') is not None: template_dict['playlist_index'] = '%0*d' % (len(str(template_dict['n_entries'])), template_dict['playlist_index']) if template_dict.get('resolution') is None: if template_dict.get('width') and template_dict.get('height'): template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height']) elif template_dict.get('height'): template_dict['resolution'] = '%sp' % template_dict['height'] elif template_dict.get('width'): template_dict['resolution'] = '?x%d' % template_dict['width'] sanitize = lambda k, v: sanitize_filename( compat_str(v), restricted=self.params.get('restrictfilenames'), is_id=(k == 'id')) template_dict = dict((k, sanitize(k, v)) for k, v in template_dict.items() if v is not None) template_dict = collections.defaultdict(lambda: 'NA', template_dict) outtmpl = sanitize_path(self.params.get('outtmpl', DEFAULT_OUTTMPL)) tmpl = compat_expanduser(outtmpl) filename = tmpl % template_dict # Temporary fix for #4787 # 'Treat' all problem characters by passing filename through preferredencoding # to workaround encoding issues with subprocess on python2 @ Windows if sys.version_info < (3, 0) and sys.platform == 'win32': filename = encodeFilename(filename, True).decode(preferredencoding()) return filename except ValueError as err: self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')') return None def _match_entry(self, info_dict, incomplete): """ Returns None iff the file should be downloaded """ video_title = info_dict.get('title', info_dict.get('id', 'video')) if 'title' in info_dict: # This can happen when we're just evaluating the playlist title = info_dict['title'] matchtitle = self.params.get('matchtitle', False) if matchtitle: if not re.search(matchtitle, title, re.IGNORECASE): return '"' + title + '" title did not match pattern "' + matchtitle + '"' rejecttitle = self.params.get('rejecttitle', False) if rejecttitle: if re.search(rejecttitle, title, re.IGNORECASE): return '"' + title + '" title matched reject pattern "' + rejecttitle + '"' date = info_dict.get('upload_date', None) if date is not None: dateRange = self.params.get('daterange', DateRange()) if date not in dateRange: return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange) view_count = info_dict.get('view_count', None) if view_count is not None: min_views = self.params.get('min_views') if min_views is not None and view_count < min_views: return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views) max_views = self.params.get('max_views') if max_views is not None and view_count > max_views: return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views) if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')): return 'Skipping "%s" because it is age restricted' % video_title if self.in_download_archive(info_dict): return '%s has already been recorded in archive' % video_title if not incomplete: match_filter = self.params.get('match_filter') if match_filter is not None: ret = match_filter(info_dict) if ret is not None: return ret return None @staticmethod def add_extra_info(info_dict, extra_info): '''Set the keys from extra_info in info dict if they are missing''' for key, value in extra_info.items(): info_dict.setdefault(key, value) def extract_info(self, url, download=True, ie_key=None, extra_info={}, process=True, force_generic_extractor=False): ''' Returns a list with a dictionary for each video we find. If 'download', also downloads the videos. extra_info is a dict containing the extra values to add to each result ''' if not ie_key and force_generic_extractor: ie_key = 'Generic' if ie_key: ies = [self.get_info_extractor(ie_key)] else: ies = self._ies for ie in ies: if not ie.suitable(url): continue if not ie.working(): self.report_warning('The program functionality for this site has been marked as broken, ' 'and will probably not work.') try: ie_result = ie.extract(url) if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here) break if isinstance(ie_result, list): # Backwards compatibility: old IE result format ie_result = { '_type': 'compat_list', 'entries': ie_result, } self.add_default_extra_info(ie_result, ie, url) if process: return self.process_ie_result(ie_result, download, extra_info) else: return ie_result except ExtractorError as de: # An error we somewhat expected self.report_error(compat_str(de), de.format_traceback()) break except MaxDownloadsReached: raise except Exception as e: if self.params.get('ignoreerrors', False): self.report_error(compat_str(e), tb=compat_str(traceback.format_exc())) break else: raise else: self.report_error('no suitable InfoExtractor for URL %s' % url) def add_default_extra_info(self, ie_result, ie, url): self.add_extra_info(ie_result, { 'extractor': ie.IE_NAME, 'webpage_url': url, 'webpage_url_basename': url_basename(url), 'extractor_key': ie.ie_key(), }) def process_ie_result(self, ie_result, download=True, extra_info={}): """ Take the result of the ie(may be modified) and resolve all unresolved references (URLs, playlist items). It will also download the videos if 'download'. Returns the resolved ie_result. """ result_type = ie_result.get('_type', 'video') if result_type in ('url', 'url_transparent'): extract_flat = self.params.get('extract_flat', False) if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or extract_flat is True): if self.params.get('forcejson', False): self.to_stdout(json.dumps(ie_result)) return ie_result if result_type == 'video': self.add_extra_info(ie_result, extra_info) return self.process_video_result(ie_result, download=download) elif result_type == 'url': # We have to add extra_info to the results because it may be # contained in a playlist return self.extract_info(ie_result['url'], download, ie_key=ie_result.get('ie_key'), extra_info=extra_info) elif result_type == 'url_transparent': # Use the information from the embedding page info = self.extract_info( ie_result['url'], ie_key=ie_result.get('ie_key'), extra_info=extra_info, download=False, process=False) force_properties = dict( (k, v) for k, v in ie_result.items() if v is not None) for f in ('_type', 'url'): if f in force_properties: del force_properties[f] new_result = info.copy() new_result.update(force_properties) assert new_result.get('_type') != 'url_transparent' return self.process_ie_result( new_result, download=download, extra_info=extra_info) elif result_type == 'playlist' or result_type == 'multi_video': # We process each entry in the playlist playlist = ie_result.get('title', None) or ie_result.get('id', None) self.to_screen('[download] Downloading playlist: %s' % playlist) playlist_results = [] playliststart = self.params.get('playliststart', 1) - 1 playlistend = self.params.get('playlistend', None) # For backwards compatibility, interpret -1 as whole list if playlistend == -1: playlistend = None playlistitems_str = self.params.get('playlist_items', None) playlistitems = None if playlistitems_str is not None: def iter_playlistitems(format): for string_segment in format.split(','): if '-' in string_segment: start, end = string_segment.split('-') for item in range(int(start), int(end) + 1): yield int(item) else: yield int(string_segment) playlistitems = iter_playlistitems(playlistitems_str) ie_entries = ie_result['entries'] if isinstance(ie_entries, list): n_all_entries = len(ie_entries) if playlistitems: entries = [ ie_entries[i - 1] for i in playlistitems if -n_all_entries <= i - 1 < n_all_entries] else: entries = ie_entries[playliststart:playlistend] n_entries = len(entries) self.to_screen( "[%s] playlist %s: Collected %d video ids (downloading %d of them)" % (ie_result['extractor'], playlist, n_all_entries, n_entries)) elif isinstance(ie_entries, PagedList): if playlistitems: entries = [] for item in playlistitems: entries.extend(ie_entries.getslice( item - 1, item )) else: entries = ie_entries.getslice( playliststart, playlistend) n_entries = len(entries) self.to_screen( "[%s] playlist %s: Downloading %d videos" % (ie_result['extractor'], playlist, n_entries)) else: # iterable if playlistitems: entry_list = list(ie_entries) entries = [entry_list[i - 1] for i in playlistitems] else: entries = list(itertools.islice( ie_entries, playliststart, playlistend)) n_entries = len(entries) self.to_screen( "[%s] playlist %s: Downloading %d videos" % (ie_result['extractor'], playlist, n_entries)) if self.params.get('playlistreverse', False): entries = entries[::-1] for i, entry in enumerate(entries, 1): self.to_screen('[download] Downloading video %s of %s' % (i, n_entries)) extra = { 'n_entries': n_entries, 'playlist': playlist, 'playlist_id': ie_result.get('id'), 'playlist_title': ie_result.get('title'), 'playlist_index': i + playliststart, 'extractor': ie_result['extractor'], 'webpage_url': ie_result['webpage_url'], 'webpage_url_basename': url_basename(ie_result['webpage_url']), 'extractor_key': ie_result['extractor_key'], } reason = self._match_entry(entry, incomplete=True) if reason is not None: self.to_screen('[download] ' + reason) continue entry_result = self.process_ie_result(entry, download=download, extra_info=extra) playlist_results.append(entry_result) ie_result['entries'] = playlist_results return ie_result elif result_type == 'compat_list': self.report_warning( 'Extractor %s returned a compat_list result. ' 'It needs to be updated.' % ie_result.get('extractor')) def _fixup(r): self.add_extra_info( r, { 'extractor': ie_result['extractor'], 'webpage_url': ie_result['webpage_url'], 'webpage_url_basename': url_basename(ie_result['webpage_url']), 'extractor_key': ie_result['extractor_key'], } ) return r ie_result['entries'] = [ self.process_ie_result(_fixup(r), download, extra_info) for r in ie_result['entries'] ] return ie_result else: raise Exception('Invalid result type: %s' % result_type) def _build_format_filter(self, filter_spec): " Returns a function to filter the formats according to the filter_spec " OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>width|height|tbr|abr|vbr|asr|filesize|fps) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?) $ ''' % '|'.join(map(re.escape, OPERATORS.keys()))) m = operator_rex.search(filter_spec) if m: try: comparison_value = int(m.group('value')) except ValueError: comparison_value = parse_filesize(m.group('value')) if comparison_value is None: comparison_value = parse_filesize(m.group('value') + 'B') if comparison_value is None: raise ValueError( 'Invalid value %r in format specification %r' % ( m.group('value'), filter_spec)) op = OPERATORS[m.group('op')] if not m: STR_OPERATORS = { '=': operator.eq, '!=': operator.ne, } str_operator_rex = re.compile(r'''(?x) \s*(?P<key>ext|acodec|vcodec|container|protocol) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)? \s*(?P<value>[a-zA-Z0-9_-]+) \s*$ ''' % '|'.join(map(re.escape, STR_OPERATORS.keys()))) m = str_operator_rex.search(filter_spec) if m: comparison_value = m.group('value') op = STR_OPERATORS[m.group('op')] if not m: raise ValueError('Invalid filter specification %r' % filter_spec) def _filter(f): actual_value = f.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) return _filter def build_format_selector(self, format_spec): def syntax_error(note, start): message = ( 'Invalid format specification: ' '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1])) return SyntaxError(message) PICKFIRST = 'PICKFIRST' MERGE = 'MERGE' SINGLE = 'SINGLE' GROUP = 'GROUP' FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters']) def _parse_filter(tokens): filter_parts = [] for type, string, start, _, _ in tokens: if type == tokenize.OP and string == ']': return ''.join(filter_parts) else: filter_parts.append(string) def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False): selectors = [] current_selector = None for type, string, start, _, _ in tokens: # ENCODING is only defined in python 3.x if type == getattr(tokenize, 'ENCODING', None): continue elif type in [tokenize.NAME, tokenize.NUMBER]: current_selector = FormatSelector(SINGLE, string, []) elif type == tokenize.OP: if string == ')': if not inside_group: # ')' will be handled by the parentheses group tokens.restore_last_token() break elif inside_merge and string in ['/', ',']: tokens.restore_last_token() break elif inside_choice and string == ',': tokens.restore_last_token() break elif string == ',': selectors.append(current_selector) current_selector = None elif string == '/': first_choice = current_selector second_choice = _parse_format_selection(tokens, inside_choice=True) current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), []) elif string == '[': if not current_selector: current_selector = FormatSelector(SINGLE, 'best', []) format_filter = _parse_filter(tokens) current_selector.filters.append(format_filter) elif string == '(': if current_selector: raise syntax_error('Unexpected "("', start) group = _parse_format_selection(tokens, inside_group=True) current_selector = FormatSelector(GROUP, group, []) elif string == '+': video_selector = current_selector audio_selector = _parse_format_selection(tokens, inside_merge=True) current_selector = FormatSelector(MERGE, (video_selector, audio_selector), []) else: raise syntax_error('Operator not recognized: "{0}"'.format(string), start) elif type == tokenize.ENDMARKER: break if current_selector: selectors.append(current_selector) return selectors def _build_selector_function(selector): if isinstance(selector, list): fs = [_build_selector_function(s) for s in selector] def selector_function(formats): for f in fs: for format in f(formats): yield format return selector_function elif selector.type == GROUP: selector_function = _build_selector_function(selector.selector) elif selector.type == PICKFIRST: fs = [_build_selector_function(s) for s in selector.selector] def selector_function(formats): for f in fs: picked_formats = list(f(formats)) if picked_formats: return picked_formats return [] elif selector.type == SINGLE: format_spec = selector.selector def selector_function(formats): formats = list(formats) if not formats: return if format_spec == 'all': for f in formats: yield f elif format_spec in ['best', 'worst', None]: format_idx = 0 if format_spec == 'worst' else -1 audiovideo_formats = [ f for f in formats if f.get('vcodec') != 'none' and f.get('acodec') != 'none'] if audiovideo_formats: yield audiovideo_formats[format_idx] # for audio only (soundcloud) or video only (imgur) urls, select the best/worst audio format elif (all(f.get('acodec') != 'none' for f in formats) or all(f.get('vcodec') != 'none' for f in formats)): yield formats[format_idx] elif format_spec == 'bestaudio': audio_formats = [ f for f in formats if f.get('vcodec') == 'none'] if audio_formats: yield audio_formats[-1] elif format_spec == 'worstaudio': audio_formats = [ f for f in formats if f.get('vcodec') == 'none'] if audio_formats: yield audio_formats[0] elif format_spec == 'bestvideo': video_formats = [ f for f in formats if f.get('acodec') == 'none'] if video_formats: yield video_formats[-1] elif format_spec == 'worstvideo': video_formats = [ f for f in formats if f.get('acodec') == 'none'] if video_formats: yield video_formats[0] else: extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav'] if format_spec in extensions: filter_f = lambda f: f['ext'] == format_spec else: filter_f = lambda f: f['format_id'] == format_spec matches = list(filter(filter_f, formats)) if matches: yield matches[-1] elif selector.type == MERGE: def _merge(formats_info): format_1, format_2 = [f['format_id'] for f in formats_info] # The first format must contain the video and the # second the audio if formats_info[0].get('vcodec') == 'none': self.report_error('The first format must ' 'contain the video, try using ' '"-f %s+%s"' % (format_2, format_1)) return output_ext = ( formats_info[0]['ext'] if self.params.get('merge_output_format') is None else self.params['merge_output_format']) return { 'requested_formats': formats_info, 'format': '%s+%s' % (formats_info[0].get('format'), formats_info[1].get('format')), 'format_id': '%s+%s' % (formats_info[0].get('format_id'), formats_info[1].get('format_id')), 'width': formats_info[0].get('width'), 'height': formats_info[0].get('height'), 'resolution': formats_info[0].get('resolution'), 'fps': formats_info[0].get('fps'), 'vcodec': formats_info[0].get('vcodec'), 'vbr': formats_info[0].get('vbr'), 'stretched_ratio': formats_info[0].get('stretched_ratio'), 'acodec': formats_info[1].get('acodec'), 'abr': formats_info[1].get('abr'), 'ext': output_ext, } video_selector, audio_selector = map(_build_selector_function, selector.selector) def selector_function(formats): formats = list(formats) for pair in itertools.product(video_selector(formats), audio_selector(formats)): yield _merge(pair) filters = [self._build_format_filter(f) for f in selector.filters] def final_selector(formats): for _filter in filters: formats = list(filter(_filter, formats)) return selector_function(formats) return final_selector stream = io.BytesIO(format_spec.encode('utf-8')) try: tokens = list(compat_tokenize_tokenize(stream.readline)) except tokenize.TokenError: raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec))) class TokenIterator(object): def __init__(self, tokens): self.tokens = tokens self.counter = 0 def __iter__(self): return self def __next__(self): if self.counter >= len(self.tokens): raise StopIteration() value = self.tokens[self.counter] self.counter += 1 return value next = __next__ def restore_last_token(self): self.counter -= 1 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens))) return _build_selector_function(parsed_selector) def _calc_headers(self, info_dict): res = std_headers.copy() add_headers = info_dict.get('http_headers') if add_headers: res.update(add_headers) cookies = self._calc_cookies(info_dict) if cookies: res['Cookie'] = cookies return res def _calc_cookies(self, info_dict): pr = compat_urllib_request.Request(info_dict['url']) self.cookiejar.add_cookie_header(pr) return pr.get_header('Cookie') def process_video_result(self, info_dict, download=True): assert info_dict.get('_type', 'video') == 'video' if 'id' not in info_dict: raise ExtractorError('Missing "id" field in extractor result') if 'title' not in info_dict: raise ExtractorError('Missing "title" field in extractor result') if 'playlist' not in info_dict: # It isn't part of a playlist info_dict['playlist'] = None info_dict['playlist_index'] = None thumbnails = info_dict.get('thumbnails') if thumbnails is None: thumbnail = info_dict.get('thumbnail') if thumbnail: info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}] if thumbnails: thumbnails.sort(key=lambda t: ( t.get('preference'), t.get('width'), t.get('height'), t.get('id'), t.get('url'))) for i, t in enumerate(thumbnails): if 'width' in t and 'height' in t: t['resolution'] = '%dx%d' % (t['width'], t['height']) if t.get('id') is None: t['id'] = '%d' % i if thumbnails and 'thumbnail' not in info_dict: info_dict['thumbnail'] = thumbnails[-1]['url'] if 'display_id' not in info_dict and 'id' in info_dict: info_dict['display_id'] = info_dict['id'] if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None: # Working around out-of-range timestamp values (e.g. negative ones on Windows, # see http://bugs.python.org/issue1646728) try: upload_date = datetime.datetime.utcfromtimestamp(info_dict['timestamp']) info_dict['upload_date'] = upload_date.strftime('%Y%m%d') except (ValueError, OverflowError, OSError): pass if self.params.get('listsubtitles', False): if 'automatic_captions' in info_dict: self.list_subtitles(info_dict['id'], info_dict.get('automatic_captions'), 'automatic captions') self.list_subtitles(info_dict['id'], info_dict.get('subtitles'), 'subtitles') return info_dict['requested_subtitles'] = self.process_subtitles( info_dict['id'], info_dict.get('subtitles'), info_dict.get('automatic_captions')) # We now pick which formats have to be downloaded if info_dict.get('formats') is None: # There's only one format available formats = [info_dict] else: formats = info_dict['formats'] if not formats: raise ExtractorError('No video formats found!') formats_dict = {} # We check that all the formats have the format and format_id fields for i, format in enumerate(formats): if 'url' not in format: raise ExtractorError('Missing "url" key in result (index %d)' % i) if format.get('format_id') is None: format['format_id'] = compat_str(i) format_id = format['format_id'] if format_id not in formats_dict: formats_dict[format_id] = [] formats_dict[format_id].append(format) # Make sure all formats have unique format_id for format_id, ambiguous_formats in formats_dict.items(): if len(ambiguous_formats) > 1: for i, format in enumerate(ambiguous_formats): format['format_id'] = '%s-%d' % (format_id, i) for i, format in enumerate(formats): if format.get('format') is None: format['format'] = '{id} - {res}{note}'.format( id=format['format_id'], res=self.format_resolution(format), note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '', ) # Automatically determine file extension if missing if 'ext' not in format: format['ext'] = determine_ext(format['url']).lower() # Add HTTP headers, so that external programs can use them from the # json output full_format_info = info_dict.copy() full_format_info.update(format) format['http_headers'] = self._calc_headers(full_format_info) # TODO Central sorting goes here if formats[0] is not info_dict: # only set the 'formats' fields if the original info_dict list them # otherwise we end up with a circular reference, the first (and unique) # element in the 'formats' field in info_dict is info_dict itself, # wich can't be exported to json info_dict['formats'] = formats if self.params.get('listformats'): self.list_formats(info_dict) return if self.params.get('list_thumbnails'): self.list_thumbnails(info_dict) return req_format = self.params.get('format') if req_format is None: req_format_list = [] if (self.params.get('outtmpl', DEFAULT_OUTTMPL) != '-' and info_dict['extractor'] in ['youtube', 'ted']): merger = FFmpegMergerPP(self) if merger.available and merger.can_merge(): req_format_list.append('bestvideo+bestaudio') req_format_list.append('best') req_format = '/'.join(req_format_list) format_selector = self.build_format_selector(req_format) formats_to_download = list(format_selector(formats)) if not formats_to_download: raise ExtractorError('requested format not available', expected=True) if download: if len(formats_to_download) > 1: self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download))) for format in formats_to_download: new_info = dict(info_dict) new_info.update(format) self.process_info(new_info) # We update the info dict with the best quality format (backwards compatibility) info_dict.update(formats_to_download[-1]) return info_dict def process_subtitles(self, video_id, normal_subtitles, automatic_captions): """Select the requested subtitles and their format""" available_subs = {} if normal_subtitles and self.params.get('writesubtitles'): available_subs.update(normal_subtitles) if automatic_captions and self.params.get('writeautomaticsub'): for lang, cap_info in automatic_captions.items(): if lang not in available_subs: available_subs[lang] = cap_info if (not self.params.get('writesubtitles') and not self.params.get('writeautomaticsub') or not available_subs): return None if self.params.get('allsubtitles', False): requested_langs = available_subs.keys() else: if self.params.get('subtitleslangs', False): requested_langs = self.params.get('subtitleslangs') elif 'en' in available_subs: requested_langs = ['en'] else: requested_langs = [list(available_subs.keys())[0]] formats_query = self.params.get('subtitlesformat', 'best') formats_preference = formats_query.split('/') if formats_query else [] subs = {} for lang in requested_langs: formats = available_subs.get(lang) if formats is None: self.report_warning('%s subtitles not available for %s' % (lang, video_id)) continue for ext in formats_preference: if ext == 'best': f = formats[-1] break matches = list(filter(lambda f: f['ext'] == ext, formats)) if matches: f = matches[-1] break else: f = formats[-1] self.report_warning( 'No subtitle format found matching "%s" for language %s, ' 'using %s' % (formats_query, lang, f['ext'])) subs[lang] = f return subs def process_info(self, info_dict): """Process a single resolved IE result.""" assert info_dict.get('_type', 'video') == 'video' max_downloads = self.params.get('max_downloads') if max_downloads is not None: if self._num_downloads >= int(max_downloads): raise MaxDownloadsReached() info_dict['fulltitle'] = info_dict['title'] if len(info_dict['title']) > 200: info_dict['title'] = info_dict['title'][:197] + '...' if 'format' not in info_dict: info_dict['format'] = info_dict['ext'] reason = self._match_entry(info_dict, incomplete=False) if reason is not None: self.to_screen('[download] ' + reason) return self._num_downloads += 1 info_dict['_filename'] = filename = self.prepare_filename(info_dict) # Forced printings if self.params.get('forcetitle', False): self.to_stdout(info_dict['fulltitle']) if self.params.get('forceid', False): self.to_stdout(info_dict['id']) if self.params.get('forceurl', False): if info_dict.get('requested_formats') is not None: for f in info_dict['requested_formats']: self.to_stdout(f['url'] + f.get('play_path', '')) else: # For RTMP URLs, also include the playpath self.to_stdout(info_dict['url'] + info_dict.get('play_path', '')) if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None: self.to_stdout(info_dict['thumbnail']) if self.params.get('forcedescription', False) and info_dict.get('description') is not None: self.to_stdout(info_dict['description']) if self.params.get('forcefilename', False) and filename is not None: self.to_stdout(filename) if self.params.get('forceduration', False) and info_dict.get('duration') is not None: self.to_stdout(formatSeconds(info_dict['duration'])) if self.params.get('forceformat', False): self.to_stdout(info_dict['format']) if self.params.get('forcejson', False): self.to_stdout(json.dumps(info_dict)) # Do nothing else if in simulate mode if self.params.get('simulate', False): return if filename is None: return try: dn = os.path.dirname(sanitize_path(encodeFilename(filename))) if dn and not os.path.exists(dn): os.makedirs(dn) except (OSError, IOError) as err: self.report_error('unable to create directory ' + compat_str(err)) return if self.params.get('writedescription', False): descfn = replace_extension(filename, 'description', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)): self.to_screen('[info] Video description is already present') elif info_dict.get('description') is None: self.report_warning('There\'s no description to write.') else: try: self.to_screen('[info] Writing video description to: ' + descfn) with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: descfile.write(info_dict['description']) except (OSError, IOError): self.report_error('Cannot write description file ' + descfn) return if self.params.get('writeannotations', False): annofn = replace_extension(filename, 'annotations.xml', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)): self.to_screen('[info] Video annotations are already present') else: try: self.to_screen('[info] Writing video annotations to: ' + annofn) with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile: annofile.write(info_dict['annotations']) except (KeyError, TypeError): self.report_warning('There are no annotations to write.') except (OSError, IOError): self.report_error('Cannot write annotations file: ' + annofn) return subtitles_are_requested = any([self.params.get('writesubtitles', False), self.params.get('writeautomaticsub')]) if subtitles_are_requested and info_dict.get('requested_subtitles'): # subtitles download errors are already managed as troubles in relevant IE # that way it will silently go on when used with unsupporting IE subtitles = info_dict['requested_subtitles'] ie = self.get_info_extractor(info_dict['extractor_key']) for sub_lang, sub_info in subtitles.items(): sub_format = sub_info['ext'] if sub_info.get('data') is not None: sub_data = sub_info['data'] else: try: sub_data = ie._download_webpage( sub_info['url'], info_dict['id'], note=False) except ExtractorError as err: self.report_warning('Unable to download subtitle for "%s": %s' % (sub_lang, compat_str(err.cause))) continue try: sub_filename = subtitles_filename(filename, sub_lang, sub_format) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)): self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format)) else: self.to_screen('[info] Writing video subtitles to: ' + sub_filename) with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile: subfile.write(sub_data) except (OSError, IOError): self.report_error('Cannot write subtitles file ' + sub_filename) return if self.params.get('writeinfojson', False): infofn = replace_extension(filename, 'info.json', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)): self.to_screen('[info] Video description metadata is already present') else: self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn) try: write_json_file(self.filter_requested_info(info_dict), infofn) except (OSError, IOError): self.report_error('Cannot write metadata to JSON file ' + infofn) return self._write_thumbnails(info_dict, filename) if not self.params.get('skip_download', False): try: def dl(name, info): fd = get_suitable_downloader(info, self.params)(self, self.params) for ph in self._progress_hooks: fd.add_progress_hook(ph) if self.params.get('verbose'): self.to_stdout('[debug] Invoking downloader on %r' % info.get('url')) return fd.download(name, info) if info_dict.get('requested_formats') is not None: downloaded = [] success = True merger = FFmpegMergerPP(self) if not merger.available: postprocessors = [] self.report_warning('You have requested multiple ' 'formats but ffmpeg or avconv are not installed.' ' The formats won\'t be merged.') else: postprocessors = [merger] def compatible_formats(formats): video, audio = formats # Check extension video_ext, audio_ext = audio.get('ext'), video.get('ext') if video_ext and audio_ext: COMPATIBLE_EXTS = ( ('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v'), ('webm') ) for exts in COMPATIBLE_EXTS: if video_ext in exts and audio_ext in exts: return True # TODO: Check acodec/vcodec return False filename_real_ext = os.path.splitext(filename)[1][1:] filename_wo_ext = ( os.path.splitext(filename)[0] if filename_real_ext == info_dict['ext'] else filename) requested_formats = info_dict['requested_formats'] if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats): info_dict['ext'] = 'mkv' self.report_warning( 'Requested formats are incompatible for merge and will be merged into mkv.') # Ensure filename always has a correct extension for successful merge filename = '%s.%s' % (filename_wo_ext, info_dict['ext']) if os.path.exists(encodeFilename(filename)): self.to_screen( '[download] %s has already been downloaded and ' 'merged' % filename) else: for f in requested_formats: new_info = dict(info_dict) new_info.update(f) fname = self.prepare_filename(new_info) fname = prepend_extension(fname, 'f%s' % f['format_id'], new_info['ext']) downloaded.append(fname) partial_success = dl(fname, new_info) success = success and partial_success info_dict['__postprocessors'] = postprocessors info_dict['__files_to_merge'] = downloaded else: # Just a single file success = dl(filename, info_dict) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_error('unable to download video data: %s' % str(err)) return except (OSError, IOError) as err: raise UnavailableVideoError(err) except (ContentTooShortError, ) as err: self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) return if success: # Fixup content fixup_policy = self.params.get('fixup') if fixup_policy is None: fixup_policy = 'detect_or_warn' stretched_ratio = info_dict.get('stretched_ratio') if stretched_ratio is not None and stretched_ratio != 1: if fixup_policy == 'warn': self.report_warning('%s: Non-uniform pixel ratio (%s)' % ( info_dict['id'], stretched_ratio)) elif fixup_policy == 'detect_or_warn': stretched_pp = FFmpegFixupStretchedPP(self) if stretched_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(stretched_pp) else: self.report_warning( '%s: Non-uniform pixel ratio (%s). Install ffmpeg or avconv to fix this automatically.' % ( info_dict['id'], stretched_ratio)) else: assert fixup_policy in ('ignore', 'never') if info_dict.get('requested_formats') is None and info_dict.get('container') == 'm4a_dash': if fixup_policy == 'warn': self.report_warning('%s: writing DASH m4a. Only some players support this container.' % ( info_dict['id'])) elif fixup_policy == 'detect_or_warn': fixup_pp = FFmpegFixupM4aPP(self) if fixup_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(fixup_pp) else: self.report_warning( '%s: writing DASH m4a. Only some players support this container. Install ffmpeg or avconv to fix this automatically.' % ( info_dict['id'])) else: assert fixup_policy in ('ignore', 'never') try: self.post_process(filename, info_dict) except (PostProcessingError) as err: self.report_error('postprocessing: %s' % str(err)) return self.record_download_archive(info_dict) def download(self, url_list): """Download a given list of URLs.""" outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) if (len(url_list) > 1 and '%' not in outtmpl and self.params.get('max_downloads') != 1): raise SameFileError(outtmpl) for url in url_list: try: # It also downloads the videos res = self.extract_info( url, force_generic_extractor=self.params.get('force_generic_extractor', False)) except UnavailableVideoError: self.report_error('unable to download video') except MaxDownloadsReached: self.to_screen('[info] Maximum number of downloaded files reached.') raise else: if self.params.get('dump_single_json', False): self.to_stdout(json.dumps(res)) return self._download_retcode def download_with_info_file(self, info_filename): with contextlib.closing(fileinput.FileInput( [info_filename], mode='r', openhook=fileinput.hook_encoded('utf-8'))) as f: # FileInput doesn't have a read method, we can't call json.load info = self.filter_requested_info(json.loads('\n'.join(f))) try: self.process_ie_result(info, download=True) except DownloadError: webpage_url = info.get('webpage_url') if webpage_url is not None: self.report_warning('The info failed to download, trying with "%s"' % webpage_url) return self.download([webpage_url]) else: raise return self._download_retcode @staticmethod def filter_requested_info(info_dict): return dict( (k, v) for k, v in info_dict.items() if k not in ['requested_formats', 'requested_subtitles']) def post_process(self, filename, ie_info): """Run all the postprocessors on the given file.""" info = dict(ie_info) info['filepath'] = filename pps_chain = [] if ie_info.get('__postprocessors') is not None: pps_chain.extend(ie_info['__postprocessors']) pps_chain.extend(self._pps) for pp in pps_chain: files_to_delete = [] try: files_to_delete, info = pp.run(info) except PostProcessingError as e: self.report_error(e.msg) if files_to_delete and not self.params.get('keepvideo', False): for old_filename in files_to_delete: self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename) try: os.remove(encodeFilename(old_filename)) except (IOError, OSError): self.report_warning('Unable to remove downloaded original file') def _make_archive_id(self, info_dict): # Future-proof against any change in case # and backwards compatibility with prior versions extractor = info_dict.get('extractor_key') if extractor is None: if 'id' in info_dict: extractor = info_dict.get('ie_key') # key in a playlist if extractor is None: return None # Incomplete video information return extractor.lower() + ' ' + info_dict['id'] def in_download_archive(self, info_dict): fn = self.params.get('download_archive') if fn is None: return False vid_id = self._make_archive_id(info_dict) if vid_id is None: return False # Incomplete video information try: with locked_file(fn, 'r', encoding='utf-8') as archive_file: for line in archive_file: if line.strip() == vid_id: return True except IOError as ioe: if ioe.errno != errno.ENOENT: raise return False def record_download_archive(self, info_dict): fn = self.params.get('download_archive') if fn is None: return vid_id = self._make_archive_id(info_dict) assert vid_id with locked_file(fn, 'a', encoding='utf-8') as archive_file: archive_file.write(vid_id + '\n') @staticmethod def format_resolution(format, default='unknown'): if format.get('vcodec') == 'none': return 'audio only' if format.get('resolution') is not None: return format['resolution'] if format.get('height') is not None: if format.get('width') is not None: res = '%sx%s' % (format['width'], format['height']) else: res = '%sp' % format['height'] elif format.get('width') is not None: res = '?x%d' % format['width'] else: res = default return res def _format_note(self, fdict): res = '' if fdict.get('ext') in ['f4f', 'f4m']: res += '(unsupported) ' if fdict.get('format_note') is not None: res += fdict['format_note'] + ' ' if fdict.get('tbr') is not None: res += '%4dk ' % fdict['tbr'] if fdict.get('container') is not None: if res: res += ', ' res += '%s container' % fdict['container'] if (fdict.get('vcodec') is not None and fdict.get('vcodec') != 'none'): if res: res += ', ' res += fdict['vcodec'] if fdict.get('vbr') is not None: res += '@' elif fdict.get('vbr') is not None and fdict.get('abr') is not None: res += 'video@' if fdict.get('vbr') is not None: res += '%4dk' % fdict['vbr'] if fdict.get('fps') is not None: res += ', %sfps' % fdict['fps'] if fdict.get('acodec') is not None: if res: res += ', ' if fdict['acodec'] == 'none': res += 'video only' else: res += '%-5s' % fdict['acodec'] elif fdict.get('abr') is not None: if res: res += ', ' res += 'audio' if fdict.get('abr') is not None: res += '@%3dk' % fdict['abr'] if fdict.get('asr') is not None: res += ' (%5dHz)' % fdict['asr'] if fdict.get('filesize') is not None: if res: res += ', ' res += format_bytes(fdict['filesize']) elif fdict.get('filesize_approx') is not None: if res: res += ', ' res += '~' + format_bytes(fdict['filesize_approx']) return res def list_formats(self, info_dict): formats = info_dict.get('formats', [info_dict]) table = [ [f['format_id'], f['ext'], self.format_resolution(f), self._format_note(f)] for f in formats if f.get('preference') is None or f['preference'] >= -1000] if len(formats) > 1: table[-1][-1] += (' ' if table[-1][-1] else '') + '(best)' header_line = ['format code', 'extension', 'resolution', 'note'] self.to_screen( '[info] Available formats for %s:\n%s' % (info_dict['id'], render_table(header_line, table))) def list_thumbnails(self, info_dict): thumbnails = info_dict.get('thumbnails') if not thumbnails: tn_url = info_dict.get('thumbnail') if tn_url: thumbnails = [{'id': '0', 'url': tn_url}] else: self.to_screen( '[info] No thumbnails present for %s' % info_dict['id']) return self.to_screen( '[info] Thumbnails for %s:' % info_dict['id']) self.to_screen(render_table( ['ID', 'width', 'height', 'URL'], [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails])) def list_subtitles(self, video_id, subtitles, name='subtitles'): if not subtitles: self.to_screen('%s has no %s' % (video_id, name)) return self.to_screen( 'Available %s for %s:' % (name, video_id)) self.to_screen(render_table( ['Language', 'formats'], [[lang, ', '.join(f['ext'] for f in reversed(formats))] for lang, formats in subtitles.items()])) def urlopen(self, req): """ Start an HTTP download """ # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one req_is_string = isinstance(req, compat_basestring) url = req if req_is_string else req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: if req_is_string: req = url_escaped else: req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request req = req_type( url_escaped, data=req.data, headers=req.headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) return self._opener.open(req, timeout=self._socket_timeout) def print_debug_header(self): if not self.params.get('verbose'): return if type('') is not compat_str: # Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326) self.report_warning( 'Your Python is broken! Update to a newer and supported version') stdout_encoding = getattr( sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__) encoding_str = ( '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % ( locale.getpreferredencoding(), sys.getfilesystemencoding(), stdout_encoding, self.get_encoding())) write_string(encoding_str, encoding=None) self._write_string('[debug] youtube-dl version ' + __version__ + '\n') try: sp = subprocess.Popen( ['git', 'rev-parse', '--short', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=os.path.dirname(os.path.abspath(__file__))) out, err = sp.communicate() out = out.decode().strip() if re.match('[0-9a-f]+', out): self._write_string('[debug] Git HEAD: ' + out + '\n') except Exception: try: sys.exc_clear() except Exception: pass self._write_string('[debug] Python version %s - %s\n' % ( platform.python_version(), platform_name())) exe_versions = FFmpegPostProcessor.get_versions(self) exe_versions['rtmpdump'] = rtmpdump_version() exe_str = ', '.join( '%s %s' % (exe, v) for exe, v in sorted(exe_versions.items()) if v ) if not exe_str: exe_str = 'none' self._write_string('[debug] exe versions: %s\n' % exe_str) proxy_map = {} for handler in self._opener.handlers: if hasattr(handler, 'proxies'): proxy_map.update(handler.proxies) self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n') if self.params.get('call_home', False): ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8') self._write_string('[debug] Public IP address: %s\n' % ipaddr) latest_version = self.urlopen( 'https://yt-dl.org/latest/version').read().decode('utf-8') if version_tuple(latest_version) > version_tuple(__version__): self.report_warning( 'You are using an outdated version (newest version: %s)! ' 'See https://yt-dl.org/update if you need help updating.' % latest_version) def _setup_opener(self): timeout_val = self.params.get('socket_timeout') self._socket_timeout = 600 if timeout_val is None else float(timeout_val) opts_cookiefile = self.params.get('cookiefile') opts_proxy = self.params.get('proxy') if opts_cookiefile is None: self.cookiejar = compat_cookiejar.CookieJar() else: self.cookiejar = compat_cookiejar.MozillaCookieJar( opts_cookiefile) if os.access(opts_cookiefile, os.R_OK): self.cookiejar.load() cookie_processor = compat_urllib_request.HTTPCookieProcessor( self.cookiejar) if opts_proxy is not None: if opts_proxy == '': proxies = {} else: proxies = {'http': opts_proxy, 'https': opts_proxy} else: proxies = compat_urllib_request.getproxies() # Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805) if 'http' in proxies and 'https' not in proxies: proxies['https'] = proxies['http'] proxy_handler = PerRequestProxyHandler(proxies) debuglevel = 1 if self.params.get('debug_printtraffic') else 0 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel) ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel) opener = compat_urllib_request.build_opener( proxy_handler, https_handler, cookie_processor, ydlh) # Delete the default user-agent header, which would otherwise apply in # cases where our custom HTTP handler doesn't come into play # (See https://github.com/rg3/youtube-dl/issues/1309 for details) opener.addheaders = [] self._opener = opener def encode(self, s): if isinstance(s, bytes): return s # Already encoded try: return s.encode(self.get_encoding()) except UnicodeEncodeError as err: err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.' raise def get_encoding(self): encoding = self.params.get('encoding') if encoding is None: encoding = preferredencoding() return encoding def _write_thumbnails(self, info_dict, filename): if self.params.get('writethumbnail', False): thumbnails = info_dict.get('thumbnails') if thumbnails: thumbnails = [thumbnails[-1]] elif self.params.get('write_all_thumbnails', False): thumbnails = info_dict.get('thumbnails') else: return if not thumbnails: # No thumbnails present, so return immediately return for t in thumbnails: thumb_ext = determine_ext(t['url'], 'jpg') suffix = '_%s' % t['id'] if len(thumbnails) > 1 else '' thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else '' t['filename'] = thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)): self.to_screen('[%s] %s: Thumbnail %sis already present' % (info_dict['extractor'], info_dict['id'], thumb_display_id)) else: self.to_screen('[%s] %s: Downloading thumbnail %s...' % (info_dict['extractor'], info_dict['id'], thumb_display_id)) try: uf = self.urlopen(t['url']) with open(thumb_filename, 'wb') as thumbf: shutil.copyfileobj(uf, thumbf) self.to_screen('[%s] %s: Writing thumbnail %sto: %s' % (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename)) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_warning('Unable to download thumbnail "%s": %s' % (t['url'], compat_str(err)))
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-30-fixed/youtube-dl/youtube_dl/YoutubeDL.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-30-buggy/youtube-dl/youtube_dl/YoutubeDL.py
youtube-dl-bug-8
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import collections import contextlib import datetime import errno import fileinput import io import itertools import json import locale import operator import os import platform import re import shutil import subprocess import socket import sys import time import tokenize import traceback if os.name == 'nt': import ctypes from .compat import ( compat_basestring, compat_cookiejar, compat_expanduser, compat_get_terminal_size, compat_http_client, compat_kwargs, compat_str, compat_tokenize_tokenize, compat_urllib_error, compat_urllib_request, ) from .utils import ( escape_url, ContentTooShortError, date_from_str, DateRange, DEFAULT_OUTTMPL, determine_ext, DownloadError, encodeFilename, ExtractorError, format_bytes, formatSeconds, HEADRequest, locked_file, make_HTTPS_handler, MaxDownloadsReached, PagedList, parse_filesize, PerRequestProxyHandler, PostProcessingError, platform_name, preferredencoding, render_table, SameFileError, sanitize_filename, sanitize_path, std_headers, subtitles_filename, UnavailableVideoError, url_basename, version_tuple, write_json_file, write_string, YoutubeDLHandler, prepend_extension, replace_extension, args_to_str, age_restricted, ) from .cache import Cache from .extractor import get_info_extractor, gen_extractors from .downloader import get_suitable_downloader from .downloader.rtmp import rtmpdump_version from .postprocessor import ( FFmpegFixupM4aPP, FFmpegFixupStretchedPP, FFmpegMergerPP, FFmpegPostProcessor, get_postprocessor, ) from .version import __version__ class YoutubeDL(object): """YoutubeDL class. YoutubeDL objects are the ones responsible of downloading the actual video file and writing it to disk if the user has requested it, among some other tasks. In most cases there should be one per program. As, given a video URL, the downloader doesn't know how to extract all the needed information, task that InfoExtractors do, it has to pass the URL to one of them. For this, YoutubeDL objects have a method that allows InfoExtractors to be registered in a given order. When it is passed a URL, the YoutubeDL object handles it to the first InfoExtractor it finds that reports being able to handle it. The InfoExtractor extracts all the information about the video or videos the URL refers to, and YoutubeDL process the extracted information, possibly using a File Downloader to download the video. YoutubeDL objects accept a lot of parameters. In order not to saturate the object constructor with arguments, it receives a dictionary of options instead. These options are available through the params attribute for the InfoExtractors to use. The YoutubeDL also registers itself as the downloader in charge for the InfoExtractors that are added to it, so this is a "mutual registration". Available options: username: Username for authentication purposes. password: Password for authentication purposes. videopassword: Password for accessing a video. usenetrc: Use netrc for authentication instead. verbose: Print additional info to stdout. quiet: Do not print messages to stdout. no_warnings: Do not print out anything for warnings. forceurl: Force printing final URL. forcetitle: Force printing title. forceid: Force printing ID. forcethumbnail: Force printing thumbnail URL. forcedescription: Force printing description. forcefilename: Force printing final filename. forceduration: Force printing duration. forcejson: Force printing info_dict as JSON. dump_single_json: Force printing the info_dict of the whole playlist (or video) as a single JSON line. simulate: Do not download the video files. format: Video format code. See options.py for more information. outtmpl: Template for output names. restrictfilenames: Do not allow "&" and spaces in file names ignoreerrors: Do not stop on download errors. force_generic_extractor: Force downloader to use the generic extractor nooverwrites: Prevent overwriting files. playliststart: Playlist item to start at. playlistend: Playlist item to end at. playlist_items: Specific indices of playlist to download. playlistreverse: Download playlist items in reverse order. matchtitle: Download only matching titles. rejecttitle: Reject downloads for matching titles. logger: Log messages to a logging.Logger instance. logtostderr: Log messages to stderr instead of stdout. writedescription: Write the video description to a .description file writeinfojson: Write the video description to a .info.json file writeannotations: Write the video annotations to a .annotations.xml file writethumbnail: Write the thumbnail image to a file write_all_thumbnails: Write all thumbnail formats to files writesubtitles: Write the video subtitles to a file writeautomaticsub: Write the automatic subtitles to a file allsubtitles: Downloads all the subtitles of the video (requires writesubtitles or writeautomaticsub) listsubtitles: Lists all available subtitles for the video subtitlesformat: The format code for subtitles subtitleslangs: List of languages of the subtitles to download keepvideo: Keep the video file after post-processing daterange: A DateRange object, download only if the upload_date is in the range. skip_download: Skip the actual download of the video file cachedir: Location of the cache files in the filesystem. False to disable filesystem cache. noplaylist: Download single video instead of a playlist if in doubt. age_limit: An integer representing the user's age in years. Unsuitable videos for the given age are skipped. min_views: An integer representing the minimum view count the video must have in order to not be skipped. Videos without view count information are always downloaded. None for no limit. max_views: An integer representing the maximum view count. Videos that are more popular than that are not downloaded. Videos without view count information are always downloaded. None for no limit. download_archive: File name of a file where all downloads are recorded. Videos already present in the file are not downloaded again. cookiefile: File name where cookies should be read from and dumped to. nocheckcertificate:Do not verify SSL certificates prefer_insecure: Use HTTP instead of HTTPS to retrieve information. At the moment, this is only supported by YouTube. proxy: URL of the proxy server to use cn_verification_proxy: URL of the proxy to use for IP address verification on Chinese sites. (Experimental) socket_timeout: Time to wait for unresponsive hosts, in seconds bidi_workaround: Work around buggy terminals without bidirectional text support, using fridibi debug_printtraffic:Print out sent and received HTTP traffic include_ads: Download ads as well default_search: Prepend this string if an input url is not valid. 'auto' for elaborate guessing encoding: Use this encoding instead of the system-specified. extract_flat: Do not resolve URLs, return the immediate result. Pass in 'in_playlist' to only show this behavior for playlist items. postprocessors: A list of dictionaries, each with an entry * key: The name of the postprocessor. See youtube_dl/postprocessor/__init__.py for a list. as well as any further keyword arguments for the postprocessor. progress_hooks: A list of functions that get called on download progress, with a dictionary with the entries * status: One of "downloading", "error", or "finished". Check this first and ignore unknown values. If status is one of "downloading", or "finished", the following properties may also be present: * filename: The final filename (always present) * tmpfilename: The filename we're currently writing to * downloaded_bytes: Bytes on disk * total_bytes: Size of the whole file, None if unknown * total_bytes_estimate: Guess of the eventual file size, None if unavailable. * elapsed: The number of seconds since download started. * eta: The estimated time in seconds, None if unknown * speed: The download speed in bytes/second, None if unknown * fragment_index: The counter of the currently downloaded video fragment. * fragment_count: The number of fragments (= individual files that will be merged) Progress hooks are guaranteed to be called at least once (with status "finished") if the download is successful. merge_output_format: Extension to use when merging formats. fixup: Automatically correct known faults of the file. One of: - "never": do nothing - "warn": only emit a warning - "detect_or_warn": check whether we can do anything about it, warn otherwise (default) source_address: (Experimental) Client-side IP address to bind to. call_home: Boolean, true iff we are allowed to contact the youtube-dl servers for debugging. sleep_interval: Number of seconds to sleep before each download. listformats: Print an overview of available video formats and exit. list_thumbnails: Print a table of all thumbnails and exit. match_filter: A function that gets called with the info_dict of every video. If it returns a message, the video is ignored. If it returns None, the video is downloaded. match_filter_func in utils.py is one example for this. no_color: Do not emit color codes in output. The following options determine which downloader is picked: external_downloader: Executable of the external downloader to call. None or unset for standard (built-in) downloader. hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv. The following parameters are not used by YoutubeDL itself, they are used by the downloader (see youtube_dl/downloader/common.py): nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test, noresizebuffer, retries, continuedl, noprogress, consoletitle, xattr_set_filesize, external_downloader_args. The following options are used by the post processors: prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available, otherwise prefer avconv. """ params = None _ies = [] _pps = [] _download_retcode = None _num_downloads = None _screen_file = None def __init__(self, params=None, auto_init=True): """Create a FileDownloader object with the given options.""" if params is None: params = {} self._ies = [] self._ies_instances = {} self._pps = [] self._progress_hooks = [] self._download_retcode = 0 self._num_downloads = 0 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] self._err_file = sys.stderr self.params = params self.cache = Cache(self) if params.get('bidi_workaround', False): try: import pty master, slave = pty.openpty() width = compat_get_terminal_size().columns if width is None: width_args = [] else: width_args = ['-w', str(width)] sp_kwargs = dict( stdin=subprocess.PIPE, stdout=slave, stderr=self._err_file) try: self._output_process = subprocess.Popen( ['bidiv'] + width_args, **sp_kwargs ) except OSError: self._output_process = subprocess.Popen( ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs) self._output_channel = os.fdopen(master, 'rb') except OSError as ose: if ose.errno == 2: self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.') else: raise if (sys.version_info >= (3,) and sys.platform != 'win32' and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] and not params.get('restrictfilenames', False)): # On Python 3, the Unicode filesystem API will throw errors (#1474) self.report_warning( 'Assuming --restrict-filenames since file system encoding ' 'cannot encode all characters. ' 'Set the LC_ALL environment variable to fix this.') self.params['restrictfilenames'] = True if isinstance(params.get('outtmpl'), bytes): self.report_warning( 'Parameter outtmpl is bytes, but should be a unicode string. ' 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.') self._setup_opener() if auto_init: self.print_debug_header() self.add_default_info_extractors() for pp_def_raw in self.params.get('postprocessors', []): pp_class = get_postprocessor(pp_def_raw['key']) pp_def = dict(pp_def_raw) del pp_def['key'] pp = pp_class(self, **compat_kwargs(pp_def)) self.add_post_processor(pp) for ph in self.params.get('progress_hooks', []): self.add_progress_hook(ph) def warn_if_short_id(self, argv): # short YouTube ID starting with dash? idxs = [ i for i, a in enumerate(argv) if re.match(r'^-[0-9A-Za-z_-]{10}$', a)] if idxs: correct_argv = ( ['youtube-dl'] + [a for i, a in enumerate(argv) if i not in idxs] + ['--'] + [argv[i] for i in idxs] ) self.report_warning( 'Long argument string detected. ' 'Use -- to separate parameters and URLs, like this:\n%s\n' % args_to_str(correct_argv)) def add_info_extractor(self, ie): """Add an InfoExtractor object to the end of the list.""" self._ies.append(ie) self._ies_instances[ie.ie_key()] = ie ie.set_downloader(self) def get_info_extractor(self, ie_key): """ Get an instance of an IE with name ie_key, it will try to get one from the _ies list, if there's no instance it will create a new one and add it to the extractor list. """ ie = self._ies_instances.get(ie_key) if ie is None: ie = get_info_extractor(ie_key)() self.add_info_extractor(ie) return ie def add_default_info_extractors(self): """ Add the InfoExtractors returned by gen_extractors to the end of the list """ for ie in gen_extractors(): self.add_info_extractor(ie) def add_post_processor(self, pp): """Add a PostProcessor object to the end of the chain.""" self._pps.append(pp) pp.set_downloader(self) def add_progress_hook(self, ph): """Add the progress hook (currently only for the file downloader)""" self._progress_hooks.append(ph) def _bidi_workaround(self, message): if not hasattr(self, '_output_channel'): return message assert hasattr(self, '_output_process') assert isinstance(message, compat_str) line_count = message.count('\n') + 1 self._output_process.stdin.write((message + '\n').encode('utf-8')) self._output_process.stdin.flush() res = ''.join(self._output_channel.readline().decode('utf-8') for _ in range(line_count)) return res[:-len('\n')] def to_screen(self, message, skip_eol=False): """Print message to stdout if not in quiet mode.""" return self.to_stdout(message, skip_eol, check_quiet=True) def _write_string(self, s, out=None): write_string(s, out=out, encoding=self.params.get('encoding')) def to_stdout(self, message, skip_eol=False, check_quiet=False): """Print message to stdout if not in quiet mode.""" if self.params.get('logger'): self.params['logger'].debug(message) elif not check_quiet or not self.params.get('quiet', False): message = self._bidi_workaround(message) terminator = ['\n', ''][skip_eol] output = message + terminator self._write_string(output, self._screen_file) def to_stderr(self, message): """Print message to stderr.""" assert isinstance(message, compat_str) if self.params.get('logger'): self.params['logger'].error(message) else: message = self._bidi_workaround(message) output = message + '\n' self._write_string(output, self._err_file) def to_console_title(self, message): if not self.params.get('consoletitle', False): return if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow(): # c_wchar_p() might not be necessary if `message` is # already of type unicode() ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) elif 'TERM' in os.environ: self._write_string('\033]0;%s\007' % message, self._screen_file) def save_console_title(self): if not self.params.get('consoletitle', False): return if 'TERM' in os.environ: # Save the title on stack self._write_string('\033[22;0t', self._screen_file) def restore_console_title(self): if not self.params.get('consoletitle', False): return if 'TERM' in os.environ: # Restore the title from stack self._write_string('\033[23;0t', self._screen_file) def __enter__(self): self.save_console_title() return self def __exit__(self, *args): self.restore_console_title() if self.params.get('cookiefile') is not None: self.cookiejar.save() def trouble(self, message=None, tb=None): """Determine action to take when a download problem appears. Depending on if the downloader has been configured to ignore download errors or not, this method may throw an exception or not when errors are found, after printing the message. tb, if given, is additional traceback information. """ if message is not None: self.to_stderr(message) if self.params.get('verbose'): if tb is None: if sys.exc_info()[0]: # if .trouble has been called from an except block tb = '' if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info)) tb += compat_str(traceback.format_exc()) else: tb_data = traceback.format_list(traceback.extract_stack()) tb = ''.join(tb_data) self.to_stderr(tb) if not self.params.get('ignoreerrors', False): if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: exc_info = sys.exc_info()[1].exc_info else: exc_info = sys.exc_info() raise DownloadError(message, exc_info) self._download_retcode = 1 def report_warning(self, message): ''' Print the message to stderr, it will be prefixed with 'WARNING:' If stderr is a tty file the 'WARNING:' will be colored ''' if self.params.get('logger') is not None: self.params['logger'].warning(message) else: if self.params.get('no_warnings'): return if not self.params.get('no_color') and self._err_file.isatty() and os.name != 'nt': _msg_header = '\033[0;33mWARNING:\033[0m' else: _msg_header = 'WARNING:' warning_message = '%s %s' % (_msg_header, message) self.to_stderr(warning_message) def report_error(self, message, tb=None): ''' Do the same as trouble, but prefixes the message with 'ERROR:', colored in red if stderr is a tty file. ''' if not self.params.get('no_color') and self._err_file.isatty() and os.name != 'nt': _msg_header = '\033[0;31mERROR:\033[0m' else: _msg_header = 'ERROR:' error_message = '%s %s' % (_msg_header, message) self.trouble(error_message, tb) def report_file_already_downloaded(self, file_name): """Report file has already been fully downloaded.""" try: self.to_screen('[download] %s has already been downloaded' % file_name) except UnicodeEncodeError: self.to_screen('[download] The file has already been downloaded') def prepare_filename(self, info_dict): """Generate the output filename.""" try: template_dict = dict(info_dict) template_dict['epoch'] = int(time.time()) autonumber_size = self.params.get('autonumber_size') if autonumber_size is None: autonumber_size = 5 autonumber_templ = '%0' + str(autonumber_size) + 'd' template_dict['autonumber'] = autonumber_templ % self._num_downloads if template_dict.get('playlist_index') is not None: template_dict['playlist_index'] = '%0*d' % (len(str(template_dict['n_entries'])), template_dict['playlist_index']) if template_dict.get('resolution') is None: if template_dict.get('width') and template_dict.get('height'): template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height']) elif template_dict.get('height'): template_dict['resolution'] = '%sp' % template_dict['height'] elif template_dict.get('width'): template_dict['resolution'] = '?x%d' % template_dict['width'] sanitize = lambda k, v: sanitize_filename( compat_str(v), restricted=self.params.get('restrictfilenames'), is_id=(k == 'id')) template_dict = dict((k, sanitize(k, v)) for k, v in template_dict.items() if v is not None) template_dict = collections.defaultdict(lambda: 'NA', template_dict) outtmpl = sanitize_path(self.params.get('outtmpl', DEFAULT_OUTTMPL)) tmpl = compat_expanduser(outtmpl) filename = tmpl % template_dict # Temporary fix for #4787 # 'Treat' all problem characters by passing filename through preferredencoding # to workaround encoding issues with subprocess on python2 @ Windows if sys.version_info < (3, 0) and sys.platform == 'win32': filename = encodeFilename(filename, True).decode(preferredencoding()) return filename except ValueError as err: self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')') return None def _match_entry(self, info_dict, incomplete): """ Returns None iff the file should be downloaded """ video_title = info_dict.get('title', info_dict.get('id', 'video')) if 'title' in info_dict: # This can happen when we're just evaluating the playlist title = info_dict['title'] matchtitle = self.params.get('matchtitle', False) if matchtitle: if not re.search(matchtitle, title, re.IGNORECASE): return '"' + title + '" title did not match pattern "' + matchtitle + '"' rejecttitle = self.params.get('rejecttitle', False) if rejecttitle: if re.search(rejecttitle, title, re.IGNORECASE): return '"' + title + '" title matched reject pattern "' + rejecttitle + '"' date = info_dict.get('upload_date', None) if date is not None: dateRange = self.params.get('daterange', DateRange()) if date not in dateRange: return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange) view_count = info_dict.get('view_count', None) if view_count is not None: min_views = self.params.get('min_views') if min_views is not None and view_count < min_views: return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views) max_views = self.params.get('max_views') if max_views is not None and view_count > max_views: return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views) if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')): return 'Skipping "%s" because it is age restricted' % video_title if self.in_download_archive(info_dict): return '%s has already been recorded in archive' % video_title if not incomplete: match_filter = self.params.get('match_filter') if match_filter is not None: ret = match_filter(info_dict) if ret is not None: return ret return None @staticmethod def add_extra_info(info_dict, extra_info): '''Set the keys from extra_info in info dict if they are missing''' for key, value in extra_info.items(): info_dict.setdefault(key, value) def extract_info(self, url, download=True, ie_key=None, extra_info={}, process=True, force_generic_extractor=False): ''' Returns a list with a dictionary for each video we find. If 'download', also downloads the videos. extra_info is a dict containing the extra values to add to each result ''' if not ie_key and force_generic_extractor: ie_key = 'Generic' if ie_key: ies = [self.get_info_extractor(ie_key)] else: ies = self._ies for ie in ies: if not ie.suitable(url): continue if not ie.working(): self.report_warning('The program functionality for this site has been marked as broken, ' 'and will probably not work.') try: ie_result = ie.extract(url) if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here) break if isinstance(ie_result, list): # Backwards compatibility: old IE result format ie_result = { '_type': 'compat_list', 'entries': ie_result, } self.add_default_extra_info(ie_result, ie, url) if process: return self.process_ie_result(ie_result, download, extra_info) else: return ie_result except ExtractorError as de: # An error we somewhat expected self.report_error(compat_str(de), de.format_traceback()) break except MaxDownloadsReached: raise except Exception as e: if self.params.get('ignoreerrors', False): self.report_error(compat_str(e), tb=compat_str(traceback.format_exc())) break else: raise else: self.report_error('no suitable InfoExtractor for URL %s' % url) def add_default_extra_info(self, ie_result, ie, url): self.add_extra_info(ie_result, { 'extractor': ie.IE_NAME, 'webpage_url': url, 'webpage_url_basename': url_basename(url), 'extractor_key': ie.ie_key(), }) def process_ie_result(self, ie_result, download=True, extra_info={}): """ Take the result of the ie(may be modified) and resolve all unresolved references (URLs, playlist items). It will also download the videos if 'download'. Returns the resolved ie_result. """ result_type = ie_result.get('_type', 'video') if result_type in ('url', 'url_transparent'): extract_flat = self.params.get('extract_flat', False) if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or extract_flat is True): if self.params.get('forcejson', False): self.to_stdout(json.dumps(ie_result)) return ie_result if result_type == 'video': self.add_extra_info(ie_result, extra_info) return self.process_video_result(ie_result, download=download) elif result_type == 'url': # We have to add extra_info to the results because it may be # contained in a playlist return self.extract_info(ie_result['url'], download, ie_key=ie_result.get('ie_key'), extra_info=extra_info) elif result_type == 'url_transparent': # Use the information from the embedding page info = self.extract_info( ie_result['url'], ie_key=ie_result.get('ie_key'), extra_info=extra_info, download=False, process=False) force_properties = dict( (k, v) for k, v in ie_result.items() if v is not None) for f in ('_type', 'url'): if f in force_properties: del force_properties[f] new_result = info.copy() new_result.update(force_properties) assert new_result.get('_type') != 'url_transparent' return self.process_ie_result( new_result, download=download, extra_info=extra_info) elif result_type == 'playlist' or result_type == 'multi_video': # We process each entry in the playlist playlist = ie_result.get('title', None) or ie_result.get('id', None) self.to_screen('[download] Downloading playlist: %s' % playlist) playlist_results = [] playliststart = self.params.get('playliststart', 1) - 1 playlistend = self.params.get('playlistend', None) # For backwards compatibility, interpret -1 as whole list if playlistend == -1: playlistend = None playlistitems_str = self.params.get('playlist_items', None) playlistitems = None if playlistitems_str is not None: def iter_playlistitems(format): for string_segment in format.split(','): if '-' in string_segment: start, end = string_segment.split('-') for item in range(int(start), int(end) + 1): yield int(item) else: yield int(string_segment) playlistitems = iter_playlistitems(playlistitems_str) ie_entries = ie_result['entries'] if isinstance(ie_entries, list): n_all_entries = len(ie_entries) if playlistitems: entries = [ ie_entries[i - 1] for i in playlistitems if -n_all_entries <= i - 1 < n_all_entries] else: entries = ie_entries[playliststart:playlistend] n_entries = len(entries) self.to_screen( "[%s] playlist %s: Collected %d video ids (downloading %d of them)" % (ie_result['extractor'], playlist, n_all_entries, n_entries)) elif isinstance(ie_entries, PagedList): if playlistitems: entries = [] for item in playlistitems: entries.extend(ie_entries.getslice( item - 1, item )) else: entries = ie_entries.getslice( playliststart, playlistend) n_entries = len(entries) self.to_screen( "[%s] playlist %s: Downloading %d videos" % (ie_result['extractor'], playlist, n_entries)) else: # iterable if playlistitems: entry_list = list(ie_entries) entries = [entry_list[i - 1] for i in playlistitems] else: entries = list(itertools.islice( ie_entries, playliststart, playlistend)) n_entries = len(entries) self.to_screen( "[%s] playlist %s: Downloading %d videos" % (ie_result['extractor'], playlist, n_entries)) if self.params.get('playlistreverse', False): entries = entries[::-1] for i, entry in enumerate(entries, 1): self.to_screen('[download] Downloading video %s of %s' % (i, n_entries)) extra = { 'n_entries': n_entries, 'playlist': playlist, 'playlist_id': ie_result.get('id'), 'playlist_title': ie_result.get('title'), 'playlist_index': i + playliststart, 'extractor': ie_result['extractor'], 'webpage_url': ie_result['webpage_url'], 'webpage_url_basename': url_basename(ie_result['webpage_url']), 'extractor_key': ie_result['extractor_key'], } reason = self._match_entry(entry, incomplete=True) if reason is not None: self.to_screen('[download] ' + reason) continue entry_result = self.process_ie_result(entry, download=download, extra_info=extra) playlist_results.append(entry_result) ie_result['entries'] = playlist_results return ie_result elif result_type == 'compat_list': self.report_warning( 'Extractor %s returned a compat_list result. ' 'It needs to be updated.' % ie_result.get('extractor')) def _fixup(r): self.add_extra_info( r, { 'extractor': ie_result['extractor'], 'webpage_url': ie_result['webpage_url'], 'webpage_url_basename': url_basename(ie_result['webpage_url']), 'extractor_key': ie_result['extractor_key'], } ) return r ie_result['entries'] = [ self.process_ie_result(_fixup(r), download, extra_info) for r in ie_result['entries'] ] return ie_result else: raise Exception('Invalid result type: %s' % result_type) def _build_format_filter(self, filter_spec): " Returns a function to filter the formats according to the filter_spec " OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>width|height|tbr|abr|vbr|asr|filesize|fps) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?) $ ''' % '|'.join(map(re.escape, OPERATORS.keys()))) m = operator_rex.search(filter_spec) if m: try: comparison_value = int(m.group('value')) except ValueError: comparison_value = parse_filesize(m.group('value')) if comparison_value is None: comparison_value = parse_filesize(m.group('value') + 'B') if comparison_value is None: raise ValueError( 'Invalid value %r in format specification %r' % ( m.group('value'), filter_spec)) op = OPERATORS[m.group('op')] if not m: STR_OPERATORS = { '=': operator.eq, '!=': operator.ne, } str_operator_rex = re.compile(r'''(?x) \s*(?P<key>ext|acodec|vcodec|container|protocol) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)? \s*(?P<value>[a-zA-Z0-9_-]+) \s*$ ''' % '|'.join(map(re.escape, STR_OPERATORS.keys()))) m = str_operator_rex.search(filter_spec) if m: comparison_value = m.group('value') op = STR_OPERATORS[m.group('op')] if not m: raise ValueError('Invalid filter specification %r' % filter_spec) def _filter(f): actual_value = f.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) return _filter def build_format_selector(self, format_spec): def syntax_error(note, start): message = ( 'Invalid format specification: ' '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1])) return SyntaxError(message) PICKFIRST = 'PICKFIRST' MERGE = 'MERGE' SINGLE = 'SINGLE' GROUP = 'GROUP' FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters']) def _parse_filter(tokens): filter_parts = [] for type, string, start, _, _ in tokens: if type == tokenize.OP and string == ']': return ''.join(filter_parts) else: filter_parts.append(string) def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False): selectors = [] current_selector = None for type, string, start, _, _ in tokens: # ENCODING is only defined in python 3.x if type == getattr(tokenize, 'ENCODING', None): continue elif type in [tokenize.NAME, tokenize.NUMBER]: current_selector = FormatSelector(SINGLE, string, []) elif type == tokenize.OP: if string == ')': if not inside_group: # ')' will be handled by the parentheses group tokens.restore_last_token() break elif inside_merge and string in ['/', ',']: tokens.restore_last_token() break elif inside_choice and string == ',': tokens.restore_last_token() break elif string == ',': selectors.append(current_selector) current_selector = None elif string == '/': first_choice = current_selector second_choice = _parse_format_selection(tokens, inside_choice=True) current_selector = None selectors.append(FormatSelector(PICKFIRST, (first_choice, second_choice), [])) elif string == '[': if not current_selector: current_selector = FormatSelector(SINGLE, 'best', []) format_filter = _parse_filter(tokens) current_selector.filters.append(format_filter) elif string == '(': if current_selector: raise syntax_error('Unexpected "("', start) group = _parse_format_selection(tokens, inside_group=True) current_selector = FormatSelector(GROUP, group, []) elif string == '+': video_selector = current_selector audio_selector = _parse_format_selection(tokens, inside_merge=True) current_selector = FormatSelector(MERGE, (video_selector, audio_selector), []) else: raise syntax_error('Operator not recognized: "{0}"'.format(string), start) elif type == tokenize.ENDMARKER: break if current_selector: selectors.append(current_selector) return selectors def _build_selector_function(selector): if isinstance(selector, list): fs = [_build_selector_function(s) for s in selector] def selector_function(formats): for f in fs: for format in f(formats): yield format return selector_function elif selector.type == GROUP: selector_function = _build_selector_function(selector.selector) elif selector.type == PICKFIRST: fs = [_build_selector_function(s) for s in selector.selector] def selector_function(formats): for f in fs: picked_formats = list(f(formats)) if picked_formats: return picked_formats return [] elif selector.type == SINGLE: format_spec = selector.selector def selector_function(formats): if format_spec == 'all': for f in formats: yield f elif format_spec in ['best', 'worst', None]: format_idx = 0 if format_spec == 'worst' else -1 audiovideo_formats = [ f for f in formats if f.get('vcodec') != 'none' and f.get('acodec') != 'none'] if audiovideo_formats: yield audiovideo_formats[format_idx] # for audio only (soundcloud) or video only (imgur) urls, select the best/worst audio format elif (all(f.get('acodec') != 'none' for f in formats) or all(f.get('vcodec') != 'none' for f in formats)): yield formats[format_idx] elif format_spec == 'bestaudio': audio_formats = [ f for f in formats if f.get('vcodec') == 'none'] if audio_formats: yield audio_formats[-1] elif format_spec == 'worstaudio': audio_formats = [ f for f in formats if f.get('vcodec') == 'none'] if audio_formats: yield audio_formats[0] elif format_spec == 'bestvideo': video_formats = [ f for f in formats if f.get('acodec') == 'none'] if video_formats: yield video_formats[-1] elif format_spec == 'worstvideo': video_formats = [ f for f in formats if f.get('acodec') == 'none'] if video_formats: yield video_formats[0] else: extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav'] if format_spec in extensions: filter_f = lambda f: f['ext'] == format_spec else: filter_f = lambda f: f['format_id'] == format_spec matches = list(filter(filter_f, formats)) if matches: yield matches[-1] elif selector.type == MERGE: def _merge(formats_info): format_1, format_2 = [f['format_id'] for f in formats_info] # The first format must contain the video and the # second the audio if formats_info[0].get('vcodec') == 'none': self.report_error('The first format must ' 'contain the video, try using ' '"-f %s+%s"' % (format_2, format_1)) return output_ext = ( formats_info[0]['ext'] if self.params.get('merge_output_format') is None else self.params['merge_output_format']) return { 'requested_formats': formats_info, 'format': '%s+%s' % (formats_info[0].get('format'), formats_info[1].get('format')), 'format_id': '%s+%s' % (formats_info[0].get('format_id'), formats_info[1].get('format_id')), 'width': formats_info[0].get('width'), 'height': formats_info[0].get('height'), 'resolution': formats_info[0].get('resolution'), 'fps': formats_info[0].get('fps'), 'vcodec': formats_info[0].get('vcodec'), 'vbr': formats_info[0].get('vbr'), 'stretched_ratio': formats_info[0].get('stretched_ratio'), 'acodec': formats_info[1].get('acodec'), 'abr': formats_info[1].get('abr'), 'ext': output_ext, } video_selector, audio_selector = map(_build_selector_function, selector.selector) def selector_function(formats): formats = list(formats) for pair in itertools.product(video_selector(formats), audio_selector(formats)): yield _merge(pair) filters = [self._build_format_filter(f) for f in selector.filters] def final_selector(formats): for _filter in filters: formats = list(filter(_filter, formats)) return selector_function(formats) return final_selector stream = io.BytesIO(format_spec.encode('utf-8')) try: tokens = list(compat_tokenize_tokenize(stream.readline)) except tokenize.TokenError: raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec))) class TokenIterator(object): def __init__(self, tokens): self.tokens = tokens self.counter = 0 def __iter__(self): return self def __next__(self): if self.counter >= len(self.tokens): raise StopIteration() value = self.tokens[self.counter] self.counter += 1 return value next = __next__ def restore_last_token(self): self.counter -= 1 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens))) return _build_selector_function(parsed_selector) def _calc_headers(self, info_dict): res = std_headers.copy() add_headers = info_dict.get('http_headers') if add_headers: res.update(add_headers) cookies = self._calc_cookies(info_dict) if cookies: res['Cookie'] = cookies return res def _calc_cookies(self, info_dict): pr = compat_urllib_request.Request(info_dict['url']) self.cookiejar.add_cookie_header(pr) return pr.get_header('Cookie') def process_video_result(self, info_dict, download=True): assert info_dict.get('_type', 'video') == 'video' if 'id' not in info_dict: raise ExtractorError('Missing "id" field in extractor result') if 'title' not in info_dict: raise ExtractorError('Missing "title" field in extractor result') if 'playlist' not in info_dict: # It isn't part of a playlist info_dict['playlist'] = None info_dict['playlist_index'] = None thumbnails = info_dict.get('thumbnails') if thumbnails is None: thumbnail = info_dict.get('thumbnail') if thumbnail: info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}] if thumbnails: thumbnails.sort(key=lambda t: ( t.get('preference'), t.get('width'), t.get('height'), t.get('id'), t.get('url'))) for i, t in enumerate(thumbnails): if 'width' in t and 'height' in t: t['resolution'] = '%dx%d' % (t['width'], t['height']) if t.get('id') is None: t['id'] = '%d' % i if thumbnails and 'thumbnail' not in info_dict: info_dict['thumbnail'] = thumbnails[-1]['url'] if 'display_id' not in info_dict and 'id' in info_dict: info_dict['display_id'] = info_dict['id'] if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None: # Working around out-of-range timestamp values (e.g. negative ones on Windows, # see http://bugs.python.org/issue1646728) try: upload_date = datetime.datetime.utcfromtimestamp(info_dict['timestamp']) info_dict['upload_date'] = upload_date.strftime('%Y%m%d') except (ValueError, OverflowError, OSError): pass if self.params.get('listsubtitles', False): if 'automatic_captions' in info_dict: self.list_subtitles(info_dict['id'], info_dict.get('automatic_captions'), 'automatic captions') self.list_subtitles(info_dict['id'], info_dict.get('subtitles'), 'subtitles') return info_dict['requested_subtitles'] = self.process_subtitles( info_dict['id'], info_dict.get('subtitles'), info_dict.get('automatic_captions')) # We now pick which formats have to be downloaded if info_dict.get('formats') is None: # There's only one format available formats = [info_dict] else: formats = info_dict['formats'] if not formats: raise ExtractorError('No video formats found!') formats_dict = {} # We check that all the formats have the format and format_id fields for i, format in enumerate(formats): if 'url' not in format: raise ExtractorError('Missing "url" key in result (index %d)' % i) if format.get('format_id') is None: format['format_id'] = compat_str(i) format_id = format['format_id'] if format_id not in formats_dict: formats_dict[format_id] = [] formats_dict[format_id].append(format) # Make sure all formats have unique format_id for format_id, ambiguous_formats in formats_dict.items(): if len(ambiguous_formats) > 1: for i, format in enumerate(ambiguous_formats): format['format_id'] = '%s-%d' % (format_id, i) for i, format in enumerate(formats): if format.get('format') is None: format['format'] = '{id} - {res}{note}'.format( id=format['format_id'], res=self.format_resolution(format), note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '', ) # Automatically determine file extension if missing if 'ext' not in format: format['ext'] = determine_ext(format['url']).lower() # Add HTTP headers, so that external programs can use them from the # json output full_format_info = info_dict.copy() full_format_info.update(format) format['http_headers'] = self._calc_headers(full_format_info) # TODO Central sorting goes here if formats[0] is not info_dict: # only set the 'formats' fields if the original info_dict list them # otherwise we end up with a circular reference, the first (and unique) # element in the 'formats' field in info_dict is info_dict itself, # wich can't be exported to json info_dict['formats'] = formats if self.params.get('listformats'): self.list_formats(info_dict) return if self.params.get('list_thumbnails'): self.list_thumbnails(info_dict) return req_format = self.params.get('format') if req_format is None: req_format_list = [] if (self.params.get('outtmpl', DEFAULT_OUTTMPL) != '-' and info_dict['extractor'] in ['youtube', 'ted']): merger = FFmpegMergerPP(self) if merger.available and merger.can_merge(): req_format_list.append('bestvideo+bestaudio') req_format_list.append('best') req_format = '/'.join(req_format_list) format_selector = self.build_format_selector(req_format) formats_to_download = list(format_selector(formats)) if not formats_to_download: raise ExtractorError('requested format not available', expected=True) if download: if len(formats_to_download) > 1: self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download))) for format in formats_to_download: new_info = dict(info_dict) new_info.update(format) self.process_info(new_info) # We update the info dict with the best quality format (backwards compatibility) info_dict.update(formats_to_download[-1]) return info_dict def process_subtitles(self, video_id, normal_subtitles, automatic_captions): """Select the requested subtitles and their format""" available_subs = {} if normal_subtitles and self.params.get('writesubtitles'): available_subs.update(normal_subtitles) if automatic_captions and self.params.get('writeautomaticsub'): for lang, cap_info in automatic_captions.items(): if lang not in available_subs: available_subs[lang] = cap_info if (not self.params.get('writesubtitles') and not self.params.get('writeautomaticsub') or not available_subs): return None if self.params.get('allsubtitles', False): requested_langs = available_subs.keys() else: if self.params.get('subtitleslangs', False): requested_langs = self.params.get('subtitleslangs') elif 'en' in available_subs: requested_langs = ['en'] else: requested_langs = [list(available_subs.keys())[0]] formats_query = self.params.get('subtitlesformat', 'best') formats_preference = formats_query.split('/') if formats_query else [] subs = {} for lang in requested_langs: formats = available_subs.get(lang) if formats is None: self.report_warning('%s subtitles not available for %s' % (lang, video_id)) continue for ext in formats_preference: if ext == 'best': f = formats[-1] break matches = list(filter(lambda f: f['ext'] == ext, formats)) if matches: f = matches[-1] break else: f = formats[-1] self.report_warning( 'No subtitle format found matching "%s" for language %s, ' 'using %s' % (formats_query, lang, f['ext'])) subs[lang] = f return subs def process_info(self, info_dict): """Process a single resolved IE result.""" assert info_dict.get('_type', 'video') == 'video' max_downloads = self.params.get('max_downloads') if max_downloads is not None: if self._num_downloads >= int(max_downloads): raise MaxDownloadsReached() info_dict['fulltitle'] = info_dict['title'] if len(info_dict['title']) > 200: info_dict['title'] = info_dict['title'][:197] + '...' if 'format' not in info_dict: info_dict['format'] = info_dict['ext'] reason = self._match_entry(info_dict, incomplete=False) if reason is not None: self.to_screen('[download] ' + reason) return self._num_downloads += 1 info_dict['_filename'] = filename = self.prepare_filename(info_dict) # Forced printings if self.params.get('forcetitle', False): self.to_stdout(info_dict['fulltitle']) if self.params.get('forceid', False): self.to_stdout(info_dict['id']) if self.params.get('forceurl', False): if info_dict.get('requested_formats') is not None: for f in info_dict['requested_formats']: self.to_stdout(f['url'] + f.get('play_path', '')) else: # For RTMP URLs, also include the playpath self.to_stdout(info_dict['url'] + info_dict.get('play_path', '')) if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None: self.to_stdout(info_dict['thumbnail']) if self.params.get('forcedescription', False) and info_dict.get('description') is not None: self.to_stdout(info_dict['description']) if self.params.get('forcefilename', False) and filename is not None: self.to_stdout(filename) if self.params.get('forceduration', False) and info_dict.get('duration') is not None: self.to_stdout(formatSeconds(info_dict['duration'])) if self.params.get('forceformat', False): self.to_stdout(info_dict['format']) if self.params.get('forcejson', False): self.to_stdout(json.dumps(info_dict)) # Do nothing else if in simulate mode if self.params.get('simulate', False): return if filename is None: return try: dn = os.path.dirname(sanitize_path(encodeFilename(filename))) if dn and not os.path.exists(dn): os.makedirs(dn) except (OSError, IOError) as err: self.report_error('unable to create directory ' + compat_str(err)) return if self.params.get('writedescription', False): descfn = replace_extension(filename, 'description', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)): self.to_screen('[info] Video description is already present') elif info_dict.get('description') is None: self.report_warning('There\'s no description to write.') else: try: self.to_screen('[info] Writing video description to: ' + descfn) with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: descfile.write(info_dict['description']) except (OSError, IOError): self.report_error('Cannot write description file ' + descfn) return if self.params.get('writeannotations', False): annofn = replace_extension(filename, 'annotations.xml', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)): self.to_screen('[info] Video annotations are already present') else: try: self.to_screen('[info] Writing video annotations to: ' + annofn) with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile: annofile.write(info_dict['annotations']) except (KeyError, TypeError): self.report_warning('There are no annotations to write.') except (OSError, IOError): self.report_error('Cannot write annotations file: ' + annofn) return subtitles_are_requested = any([self.params.get('writesubtitles', False), self.params.get('writeautomaticsub')]) if subtitles_are_requested and info_dict.get('requested_subtitles'): # subtitles download errors are already managed as troubles in relevant IE # that way it will silently go on when used with unsupporting IE subtitles = info_dict['requested_subtitles'] ie = self.get_info_extractor(info_dict['extractor_key']) for sub_lang, sub_info in subtitles.items(): sub_format = sub_info['ext'] if sub_info.get('data') is not None: sub_data = sub_info['data'] else: try: sub_data = ie._download_webpage( sub_info['url'], info_dict['id'], note=False) except ExtractorError as err: self.report_warning('Unable to download subtitle for "%s": %s' % (sub_lang, compat_str(err.cause))) continue try: sub_filename = subtitles_filename(filename, sub_lang, sub_format) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)): self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format)) else: self.to_screen('[info] Writing video subtitles to: ' + sub_filename) with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile: subfile.write(sub_data) except (OSError, IOError): self.report_error('Cannot write subtitles file ' + sub_filename) return if self.params.get('writeinfojson', False): infofn = replace_extension(filename, 'info.json', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)): self.to_screen('[info] Video description metadata is already present') else: self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn) try: write_json_file(self.filter_requested_info(info_dict), infofn) except (OSError, IOError): self.report_error('Cannot write metadata to JSON file ' + infofn) return self._write_thumbnails(info_dict, filename) if not self.params.get('skip_download', False): try: def dl(name, info): fd = get_suitable_downloader(info, self.params)(self, self.params) for ph in self._progress_hooks: fd.add_progress_hook(ph) if self.params.get('verbose'): self.to_stdout('[debug] Invoking downloader on %r' % info.get('url')) return fd.download(name, info) if info_dict.get('requested_formats') is not None: downloaded = [] success = True merger = FFmpegMergerPP(self) if not merger.available: postprocessors = [] self.report_warning('You have requested multiple ' 'formats but ffmpeg or avconv are not installed.' ' The formats won\'t be merged.') else: postprocessors = [merger] def compatible_formats(formats): video, audio = formats # Check extension video_ext, audio_ext = audio.get('ext'), video.get('ext') if video_ext and audio_ext: COMPATIBLE_EXTS = ( ('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v'), ('webm') ) for exts in COMPATIBLE_EXTS: if video_ext in exts and audio_ext in exts: return True # TODO: Check acodec/vcodec return False filename_real_ext = os.path.splitext(filename)[1][1:] filename_wo_ext = ( os.path.splitext(filename)[0] if filename_real_ext == info_dict['ext'] else filename) requested_formats = info_dict['requested_formats'] if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats): info_dict['ext'] = 'mkv' self.report_warning( 'Requested formats are incompatible for merge and will be merged into mkv.') # Ensure filename always has a correct extension for successful merge filename = '%s.%s' % (filename_wo_ext, info_dict['ext']) if os.path.exists(encodeFilename(filename)): self.to_screen( '[download] %s has already been downloaded and ' 'merged' % filename) else: for f in requested_formats: new_info = dict(info_dict) new_info.update(f) fname = self.prepare_filename(new_info) fname = prepend_extension(fname, 'f%s' % f['format_id'], new_info['ext']) downloaded.append(fname) partial_success = dl(fname, new_info) success = success and partial_success info_dict['__postprocessors'] = postprocessors info_dict['__files_to_merge'] = downloaded else: # Just a single file success = dl(filename, info_dict) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_error('unable to download video data: %s' % str(err)) return except (OSError, IOError) as err: raise UnavailableVideoError(err) except (ContentTooShortError, ) as err: self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) return if success: # Fixup content fixup_policy = self.params.get('fixup') if fixup_policy is None: fixup_policy = 'detect_or_warn' stretched_ratio = info_dict.get('stretched_ratio') if stretched_ratio is not None and stretched_ratio != 1: if fixup_policy == 'warn': self.report_warning('%s: Non-uniform pixel ratio (%s)' % ( info_dict['id'], stretched_ratio)) elif fixup_policy == 'detect_or_warn': stretched_pp = FFmpegFixupStretchedPP(self) if stretched_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(stretched_pp) else: self.report_warning( '%s: Non-uniform pixel ratio (%s). Install ffmpeg or avconv to fix this automatically.' % ( info_dict['id'], stretched_ratio)) else: assert fixup_policy in ('ignore', 'never') if info_dict.get('requested_formats') is None and info_dict.get('container') == 'm4a_dash': if fixup_policy == 'warn': self.report_warning('%s: writing DASH m4a. Only some players support this container.' % ( info_dict['id'])) elif fixup_policy == 'detect_or_warn': fixup_pp = FFmpegFixupM4aPP(self) if fixup_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(fixup_pp) else: self.report_warning( '%s: writing DASH m4a. Only some players support this container. Install ffmpeg or avconv to fix this automatically.' % ( info_dict['id'])) else: assert fixup_policy in ('ignore', 'never') try: self.post_process(filename, info_dict) except (PostProcessingError) as err: self.report_error('postprocessing: %s' % str(err)) return self.record_download_archive(info_dict) def download(self, url_list): """Download a given list of URLs.""" outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) if (len(url_list) > 1 and '%' not in outtmpl and self.params.get('max_downloads') != 1): raise SameFileError(outtmpl) for url in url_list: try: # It also downloads the videos res = self.extract_info( url, force_generic_extractor=self.params.get('force_generic_extractor', False)) except UnavailableVideoError: self.report_error('unable to download video') except MaxDownloadsReached: self.to_screen('[info] Maximum number of downloaded files reached.') raise else: if self.params.get('dump_single_json', False): self.to_stdout(json.dumps(res)) return self._download_retcode def download_with_info_file(self, info_filename): with contextlib.closing(fileinput.FileInput( [info_filename], mode='r', openhook=fileinput.hook_encoded('utf-8'))) as f: # FileInput doesn't have a read method, we can't call json.load info = self.filter_requested_info(json.loads('\n'.join(f))) try: self.process_ie_result(info, download=True) except DownloadError: webpage_url = info.get('webpage_url') if webpage_url is not None: self.report_warning('The info failed to download, trying with "%s"' % webpage_url) return self.download([webpage_url]) else: raise return self._download_retcode @staticmethod def filter_requested_info(info_dict): return dict( (k, v) for k, v in info_dict.items() if k not in ['requested_formats', 'requested_subtitles']) def post_process(self, filename, ie_info): """Run all the postprocessors on the given file.""" info = dict(ie_info) info['filepath'] = filename pps_chain = [] if ie_info.get('__postprocessors') is not None: pps_chain.extend(ie_info['__postprocessors']) pps_chain.extend(self._pps) for pp in pps_chain: files_to_delete = [] try: files_to_delete, info = pp.run(info) except PostProcessingError as e: self.report_error(e.msg) if files_to_delete and not self.params.get('keepvideo', False): for old_filename in files_to_delete: self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename) try: os.remove(encodeFilename(old_filename)) except (IOError, OSError): self.report_warning('Unable to remove downloaded original file') def _make_archive_id(self, info_dict): # Future-proof against any change in case # and backwards compatibility with prior versions extractor = info_dict.get('extractor_key') if extractor is None: if 'id' in info_dict: extractor = info_dict.get('ie_key') # key in a playlist if extractor is None: return None # Incomplete video information return extractor.lower() + ' ' + info_dict['id'] def in_download_archive(self, info_dict): fn = self.params.get('download_archive') if fn is None: return False vid_id = self._make_archive_id(info_dict) if vid_id is None: return False # Incomplete video information try: with locked_file(fn, 'r', encoding='utf-8') as archive_file: for line in archive_file: if line.strip() == vid_id: return True except IOError as ioe: if ioe.errno != errno.ENOENT: raise return False def record_download_archive(self, info_dict): fn = self.params.get('download_archive') if fn is None: return vid_id = self._make_archive_id(info_dict) assert vid_id with locked_file(fn, 'a', encoding='utf-8') as archive_file: archive_file.write(vid_id + '\n') @staticmethod def format_resolution(format, default='unknown'): if format.get('vcodec') == 'none': return 'audio only' if format.get('resolution') is not None: return format['resolution'] if format.get('height') is not None: if format.get('width') is not None: res = '%sx%s' % (format['width'], format['height']) else: res = '%sp' % format['height'] elif format.get('width') is not None: res = '?x%d' % format['width'] else: res = default return res def _format_note(self, fdict): res = '' if fdict.get('ext') in ['f4f', 'f4m']: res += '(unsupported) ' if fdict.get('format_note') is not None: res += fdict['format_note'] + ' ' if fdict.get('tbr') is not None: res += '%4dk ' % fdict['tbr'] if fdict.get('container') is not None: if res: res += ', ' res += '%s container' % fdict['container'] if (fdict.get('vcodec') is not None and fdict.get('vcodec') != 'none'): if res: res += ', ' res += fdict['vcodec'] if fdict.get('vbr') is not None: res += '@' elif fdict.get('vbr') is not None and fdict.get('abr') is not None: res += 'video@' if fdict.get('vbr') is not None: res += '%4dk' % fdict['vbr'] if fdict.get('fps') is not None: res += ', %sfps' % fdict['fps'] if fdict.get('acodec') is not None: if res: res += ', ' if fdict['acodec'] == 'none': res += 'video only' else: res += '%-5s' % fdict['acodec'] elif fdict.get('abr') is not None: if res: res += ', ' res += 'audio' if fdict.get('abr') is not None: res += '@%3dk' % fdict['abr'] if fdict.get('asr') is not None: res += ' (%5dHz)' % fdict['asr'] if fdict.get('filesize') is not None: if res: res += ', ' res += format_bytes(fdict['filesize']) elif fdict.get('filesize_approx') is not None: if res: res += ', ' res += '~' + format_bytes(fdict['filesize_approx']) return res def list_formats(self, info_dict): formats = info_dict.get('formats', [info_dict]) table = [ [f['format_id'], f['ext'], self.format_resolution(f), self._format_note(f)] for f in formats if f.get('preference') is None or f['preference'] >= -1000] if len(formats) > 1: table[-1][-1] += (' ' if table[-1][-1] else '') + '(best)' header_line = ['format code', 'extension', 'resolution', 'note'] self.to_screen( '[info] Available formats for %s:\n%s' % (info_dict['id'], render_table(header_line, table))) def list_thumbnails(self, info_dict): thumbnails = info_dict.get('thumbnails') if not thumbnails: tn_url = info_dict.get('thumbnail') if tn_url: thumbnails = [{'id': '0', 'url': tn_url}] else: self.to_screen( '[info] No thumbnails present for %s' % info_dict['id']) return self.to_screen( '[info] Thumbnails for %s:' % info_dict['id']) self.to_screen(render_table( ['ID', 'width', 'height', 'URL'], [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails])) def list_subtitles(self, video_id, subtitles, name='subtitles'): if not subtitles: self.to_screen('%s has no %s' % (video_id, name)) return self.to_screen( 'Available %s for %s:' % (name, video_id)) self.to_screen(render_table( ['Language', 'formats'], [[lang, ', '.join(f['ext'] for f in reversed(formats))] for lang, formats in subtitles.items()])) def urlopen(self, req): """ Start an HTTP download """ # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one req_is_string = isinstance(req, compat_basestring) url = req if req_is_string else req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: if req_is_string: req = url_escaped else: req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request req = req_type( url_escaped, data=req.data, headers=req.headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) return self._opener.open(req, timeout=self._socket_timeout) def print_debug_header(self): if not self.params.get('verbose'): return if type('') is not compat_str: # Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326) self.report_warning( 'Your Python is broken! Update to a newer and supported version') stdout_encoding = getattr( sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__) encoding_str = ( '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % ( locale.getpreferredencoding(), sys.getfilesystemencoding(), stdout_encoding, self.get_encoding())) write_string(encoding_str, encoding=None) self._write_string('[debug] youtube-dl version ' + __version__ + '\n') try: sp = subprocess.Popen( ['git', 'rev-parse', '--short', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=os.path.dirname(os.path.abspath(__file__))) out, err = sp.communicate() out = out.decode().strip() if re.match('[0-9a-f]+', out): self._write_string('[debug] Git HEAD: ' + out + '\n') except Exception: try: sys.exc_clear() except Exception: pass self._write_string('[debug] Python version %s - %s\n' % ( platform.python_version(), platform_name())) exe_versions = FFmpegPostProcessor.get_versions(self) exe_versions['rtmpdump'] = rtmpdump_version() exe_str = ', '.join( '%s %s' % (exe, v) for exe, v in sorted(exe_versions.items()) if v ) if not exe_str: exe_str = 'none' self._write_string('[debug] exe versions: %s\n' % exe_str) proxy_map = {} for handler in self._opener.handlers: if hasattr(handler, 'proxies'): proxy_map.update(handler.proxies) self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n') if self.params.get('call_home', False): ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8') self._write_string('[debug] Public IP address: %s\n' % ipaddr) latest_version = self.urlopen( 'https://yt-dl.org/latest/version').read().decode('utf-8') if version_tuple(latest_version) > version_tuple(__version__): self.report_warning( 'You are using an outdated version (newest version: %s)! ' 'See https://yt-dl.org/update if you need help updating.' % latest_version) def _setup_opener(self): timeout_val = self.params.get('socket_timeout') self._socket_timeout = 600 if timeout_val is None else float(timeout_val) opts_cookiefile = self.params.get('cookiefile') opts_proxy = self.params.get('proxy') if opts_cookiefile is None: self.cookiejar = compat_cookiejar.CookieJar() else: self.cookiejar = compat_cookiejar.MozillaCookieJar( opts_cookiefile) if os.access(opts_cookiefile, os.R_OK): self.cookiejar.load() cookie_processor = compat_urllib_request.HTTPCookieProcessor( self.cookiejar) if opts_proxy is not None: if opts_proxy == '': proxies = {} else: proxies = {'http': opts_proxy, 'https': opts_proxy} else: proxies = compat_urllib_request.getproxies() # Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805) if 'http' in proxies and 'https' not in proxies: proxies['https'] = proxies['http'] proxy_handler = PerRequestProxyHandler(proxies) debuglevel = 1 if self.params.get('debug_printtraffic') else 0 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel) ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel) opener = compat_urllib_request.build_opener( proxy_handler, https_handler, cookie_processor, ydlh) # Delete the default user-agent header, which would otherwise apply in # cases where our custom HTTP handler doesn't come into play # (See https://github.com/rg3/youtube-dl/issues/1309 for details) opener.addheaders = [] self._opener = opener def encode(self, s): if isinstance(s, bytes): return s # Already encoded try: return s.encode(self.get_encoding()) except UnicodeEncodeError as err: err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.' raise def get_encoding(self): encoding = self.params.get('encoding') if encoding is None: encoding = preferredencoding() return encoding def _write_thumbnails(self, info_dict, filename): if self.params.get('writethumbnail', False): thumbnails = info_dict.get('thumbnails') if thumbnails: thumbnails = [thumbnails[-1]] elif self.params.get('write_all_thumbnails', False): thumbnails = info_dict.get('thumbnails') else: return if not thumbnails: # No thumbnails present, so return immediately return for t in thumbnails: thumb_ext = determine_ext(t['url'], 'jpg') suffix = '_%s' % t['id'] if len(thumbnails) > 1 else '' thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else '' t['filename'] = thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)): self.to_screen('[%s] %s: Thumbnail %sis already present' % (info_dict['extractor'], info_dict['id'], thumb_display_id)) else: self.to_screen('[%s] %s: Downloading thumbnail %s...' % (info_dict['extractor'], info_dict['id'], thumb_display_id)) try: uf = self.urlopen(t['url']) with open(thumb_filename, 'wb') as thumbf: shutil.copyfileobj(uf, thumbf) self.to_screen('[%s] %s: Writing thumbnail %sto: %s' % (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename)) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_warning('Unable to download thumbnail "%s": %s' % (t['url'], compat_str(err))) #!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import collections import contextlib import datetime import errno import fileinput import io import itertools import json import locale import operator import os import platform import re import shutil import subprocess import socket import sys import time import tokenize import traceback if os.name == 'nt': import ctypes from .compat import ( compat_basestring, compat_cookiejar, compat_expanduser, compat_get_terminal_size, compat_http_client, compat_kwargs, compat_str, compat_tokenize_tokenize, compat_urllib_error, compat_urllib_request, ) from .utils import ( escape_url, ContentTooShortError, date_from_str, DateRange, DEFAULT_OUTTMPL, determine_ext, DownloadError, encodeFilename, ExtractorError, format_bytes, formatSeconds, HEADRequest, locked_file, make_HTTPS_handler, MaxDownloadsReached, PagedList, parse_filesize, PerRequestProxyHandler, PostProcessingError, platform_name, preferredencoding, render_table, SameFileError, sanitize_filename, sanitize_path, std_headers, subtitles_filename, UnavailableVideoError, url_basename, version_tuple, write_json_file, write_string, YoutubeDLHandler, prepend_extension, replace_extension, args_to_str, age_restricted, ) from .cache import Cache from .extractor import get_info_extractor, gen_extractors from .downloader import get_suitable_downloader from .downloader.rtmp import rtmpdump_version from .postprocessor import ( FFmpegFixupM4aPP, FFmpegFixupStretchedPP, FFmpegMergerPP, FFmpegPostProcessor, get_postprocessor, ) from .version import __version__ class YoutubeDL(object): """YoutubeDL class. YoutubeDL objects are the ones responsible of downloading the actual video file and writing it to disk if the user has requested it, among some other tasks. In most cases there should be one per program. As, given a video URL, the downloader doesn't know how to extract all the needed information, task that InfoExtractors do, it has to pass the URL to one of them. For this, YoutubeDL objects have a method that allows InfoExtractors to be registered in a given order. When it is passed a URL, the YoutubeDL object handles it to the first InfoExtractor it finds that reports being able to handle it. The InfoExtractor extracts all the information about the video or videos the URL refers to, and YoutubeDL process the extracted information, possibly using a File Downloader to download the video. YoutubeDL objects accept a lot of parameters. In order not to saturate the object constructor with arguments, it receives a dictionary of options instead. These options are available through the params attribute for the InfoExtractors to use. The YoutubeDL also registers itself as the downloader in charge for the InfoExtractors that are added to it, so this is a "mutual registration". Available options: username: Username for authentication purposes. password: Password for authentication purposes. videopassword: Password for accessing a video. usenetrc: Use netrc for authentication instead. verbose: Print additional info to stdout. quiet: Do not print messages to stdout. no_warnings: Do not print out anything for warnings. forceurl: Force printing final URL. forcetitle: Force printing title. forceid: Force printing ID. forcethumbnail: Force printing thumbnail URL. forcedescription: Force printing description. forcefilename: Force printing final filename. forceduration: Force printing duration. forcejson: Force printing info_dict as JSON. dump_single_json: Force printing the info_dict of the whole playlist (or video) as a single JSON line. simulate: Do not download the video files. format: Video format code. See options.py for more information. outtmpl: Template for output names. restrictfilenames: Do not allow "&" and spaces in file names ignoreerrors: Do not stop on download errors. force_generic_extractor: Force downloader to use the generic extractor nooverwrites: Prevent overwriting files. playliststart: Playlist item to start at. playlistend: Playlist item to end at. playlist_items: Specific indices of playlist to download. playlistreverse: Download playlist items in reverse order. matchtitle: Download only matching titles. rejecttitle: Reject downloads for matching titles. logger: Log messages to a logging.Logger instance. logtostderr: Log messages to stderr instead of stdout. writedescription: Write the video description to a .description file writeinfojson: Write the video description to a .info.json file writeannotations: Write the video annotations to a .annotations.xml file writethumbnail: Write the thumbnail image to a file write_all_thumbnails: Write all thumbnail formats to files writesubtitles: Write the video subtitles to a file writeautomaticsub: Write the automatic subtitles to a file allsubtitles: Downloads all the subtitles of the video (requires writesubtitles or writeautomaticsub) listsubtitles: Lists all available subtitles for the video subtitlesformat: The format code for subtitles subtitleslangs: List of languages of the subtitles to download keepvideo: Keep the video file after post-processing daterange: A DateRange object, download only if the upload_date is in the range. skip_download: Skip the actual download of the video file cachedir: Location of the cache files in the filesystem. False to disable filesystem cache. noplaylist: Download single video instead of a playlist if in doubt. age_limit: An integer representing the user's age in years. Unsuitable videos for the given age are skipped. min_views: An integer representing the minimum view count the video must have in order to not be skipped. Videos without view count information are always downloaded. None for no limit. max_views: An integer representing the maximum view count. Videos that are more popular than that are not downloaded. Videos without view count information are always downloaded. None for no limit. download_archive: File name of a file where all downloads are recorded. Videos already present in the file are not downloaded again. cookiefile: File name where cookies should be read from and dumped to. nocheckcertificate:Do not verify SSL certificates prefer_insecure: Use HTTP instead of HTTPS to retrieve information. At the moment, this is only supported by YouTube. proxy: URL of the proxy server to use cn_verification_proxy: URL of the proxy to use for IP address verification on Chinese sites. (Experimental) socket_timeout: Time to wait for unresponsive hosts, in seconds bidi_workaround: Work around buggy terminals without bidirectional text support, using fridibi debug_printtraffic:Print out sent and received HTTP traffic include_ads: Download ads as well default_search: Prepend this string if an input url is not valid. 'auto' for elaborate guessing encoding: Use this encoding instead of the system-specified. extract_flat: Do not resolve URLs, return the immediate result. Pass in 'in_playlist' to only show this behavior for playlist items. postprocessors: A list of dictionaries, each with an entry * key: The name of the postprocessor. See youtube_dl/postprocessor/__init__.py for a list. as well as any further keyword arguments for the postprocessor. progress_hooks: A list of functions that get called on download progress, with a dictionary with the entries * status: One of "downloading", "error", or "finished". Check this first and ignore unknown values. If status is one of "downloading", or "finished", the following properties may also be present: * filename: The final filename (always present) * tmpfilename: The filename we're currently writing to * downloaded_bytes: Bytes on disk * total_bytes: Size of the whole file, None if unknown * total_bytes_estimate: Guess of the eventual file size, None if unavailable. * elapsed: The number of seconds since download started. * eta: The estimated time in seconds, None if unknown * speed: The download speed in bytes/second, None if unknown * fragment_index: The counter of the currently downloaded video fragment. * fragment_count: The number of fragments (= individual files that will be merged) Progress hooks are guaranteed to be called at least once (with status "finished") if the download is successful. merge_output_format: Extension to use when merging formats. fixup: Automatically correct known faults of the file. One of: - "never": do nothing - "warn": only emit a warning - "detect_or_warn": check whether we can do anything about it, warn otherwise (default) source_address: (Experimental) Client-side IP address to bind to. call_home: Boolean, true iff we are allowed to contact the youtube-dl servers for debugging. sleep_interval: Number of seconds to sleep before each download. listformats: Print an overview of available video formats and exit. list_thumbnails: Print a table of all thumbnails and exit. match_filter: A function that gets called with the info_dict of every video. If it returns a message, the video is ignored. If it returns None, the video is downloaded. match_filter_func in utils.py is one example for this. no_color: Do not emit color codes in output. The following options determine which downloader is picked: external_downloader: Executable of the external downloader to call. None or unset for standard (built-in) downloader. hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv. The following parameters are not used by YoutubeDL itself, they are used by the downloader (see youtube_dl/downloader/common.py): nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test, noresizebuffer, retries, continuedl, noprogress, consoletitle, xattr_set_filesize, external_downloader_args. The following options are used by the post processors: prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available, otherwise prefer avconv. """ params = None _ies = [] _pps = [] _download_retcode = None _num_downloads = None _screen_file = None def __init__(self, params=None, auto_init=True): """Create a FileDownloader object with the given options.""" if params is None: params = {} self._ies = [] self._ies_instances = {} self._pps = [] self._progress_hooks = [] self._download_retcode = 0 self._num_downloads = 0 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] self._err_file = sys.stderr self.params = params self.cache = Cache(self) if params.get('bidi_workaround', False): try: import pty master, slave = pty.openpty() width = compat_get_terminal_size().columns if width is None: width_args = [] else: width_args = ['-w', str(width)] sp_kwargs = dict( stdin=subprocess.PIPE, stdout=slave, stderr=self._err_file) try: self._output_process = subprocess.Popen( ['bidiv'] + width_args, **sp_kwargs ) except OSError: self._output_process = subprocess.Popen( ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs) self._output_channel = os.fdopen(master, 'rb') except OSError as ose: if ose.errno == 2: self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.') else: raise if (sys.version_info >= (3,) and sys.platform != 'win32' and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] and not params.get('restrictfilenames', False)): # On Python 3, the Unicode filesystem API will throw errors (#1474) self.report_warning( 'Assuming --restrict-filenames since file system encoding ' 'cannot encode all characters. ' 'Set the LC_ALL environment variable to fix this.') self.params['restrictfilenames'] = True if isinstance(params.get('outtmpl'), bytes): self.report_warning( 'Parameter outtmpl is bytes, but should be a unicode string. ' 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.') self._setup_opener() if auto_init: self.print_debug_header() self.add_default_info_extractors() for pp_def_raw in self.params.get('postprocessors', []): pp_class = get_postprocessor(pp_def_raw['key']) pp_def = dict(pp_def_raw) del pp_def['key'] pp = pp_class(self, **compat_kwargs(pp_def)) self.add_post_processor(pp) for ph in self.params.get('progress_hooks', []): self.add_progress_hook(ph) def warn_if_short_id(self, argv): # short YouTube ID starting with dash? idxs = [ i for i, a in enumerate(argv) if re.match(r'^-[0-9A-Za-z_-]{10}$', a)] if idxs: correct_argv = ( ['youtube-dl'] + [a for i, a in enumerate(argv) if i not in idxs] + ['--'] + [argv[i] for i in idxs] ) self.report_warning( 'Long argument string detected. ' 'Use -- to separate parameters and URLs, like this:\n%s\n' % args_to_str(correct_argv)) def add_info_extractor(self, ie): """Add an InfoExtractor object to the end of the list.""" self._ies.append(ie) self._ies_instances[ie.ie_key()] = ie ie.set_downloader(self) def get_info_extractor(self, ie_key): """ Get an instance of an IE with name ie_key, it will try to get one from the _ies list, if there's no instance it will create a new one and add it to the extractor list. """ ie = self._ies_instances.get(ie_key) if ie is None: ie = get_info_extractor(ie_key)() self.add_info_extractor(ie) return ie def add_default_info_extractors(self): """ Add the InfoExtractors returned by gen_extractors to the end of the list """ for ie in gen_extractors(): self.add_info_extractor(ie) def add_post_processor(self, pp): """Add a PostProcessor object to the end of the chain.""" self._pps.append(pp) pp.set_downloader(self) def add_progress_hook(self, ph): """Add the progress hook (currently only for the file downloader)""" self._progress_hooks.append(ph) def _bidi_workaround(self, message): if not hasattr(self, '_output_channel'): return message assert hasattr(self, '_output_process') assert isinstance(message, compat_str) line_count = message.count('\n') + 1 self._output_process.stdin.write((message + '\n').encode('utf-8')) self._output_process.stdin.flush() res = ''.join(self._output_channel.readline().decode('utf-8') for _ in range(line_count)) return res[:-len('\n')] def to_screen(self, message, skip_eol=False): """Print message to stdout if not in quiet mode.""" return self.to_stdout(message, skip_eol, check_quiet=True) def _write_string(self, s, out=None): write_string(s, out=out, encoding=self.params.get('encoding')) def to_stdout(self, message, skip_eol=False, check_quiet=False): """Print message to stdout if not in quiet mode.""" if self.params.get('logger'): self.params['logger'].debug(message) elif not check_quiet or not self.params.get('quiet', False): message = self._bidi_workaround(message) terminator = ['\n', ''][skip_eol] output = message + terminator self._write_string(output, self._screen_file) def to_stderr(self, message): """Print message to stderr.""" assert isinstance(message, compat_str) if self.params.get('logger'): self.params['logger'].error(message) else: message = self._bidi_workaround(message) output = message + '\n' self._write_string(output, self._err_file) def to_console_title(self, message): if not self.params.get('consoletitle', False): return if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow(): # c_wchar_p() might not be necessary if `message` is # already of type unicode() ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) elif 'TERM' in os.environ: self._write_string('\033]0;%s\007' % message, self._screen_file) def save_console_title(self): if not self.params.get('consoletitle', False): return if 'TERM' in os.environ: # Save the title on stack self._write_string('\033[22;0t', self._screen_file) def restore_console_title(self): if not self.params.get('consoletitle', False): return if 'TERM' in os.environ: # Restore the title from stack self._write_string('\033[23;0t', self._screen_file) def __enter__(self): self.save_console_title() return self def __exit__(self, *args): self.restore_console_title() if self.params.get('cookiefile') is not None: self.cookiejar.save() def trouble(self, message=None, tb=None): """Determine action to take when a download problem appears. Depending on if the downloader has been configured to ignore download errors or not, this method may throw an exception or not when errors are found, after printing the message. tb, if given, is additional traceback information. """ if message is not None: self.to_stderr(message) if self.params.get('verbose'): if tb is None: if sys.exc_info()[0]: # if .trouble has been called from an except block tb = '' if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info)) tb += compat_str(traceback.format_exc()) else: tb_data = traceback.format_list(traceback.extract_stack()) tb = ''.join(tb_data) self.to_stderr(tb) if not self.params.get('ignoreerrors', False): if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: exc_info = sys.exc_info()[1].exc_info else: exc_info = sys.exc_info() raise DownloadError(message, exc_info) self._download_retcode = 1 def report_warning(self, message): ''' Print the message to stderr, it will be prefixed with 'WARNING:' If stderr is a tty file the 'WARNING:' will be colored ''' if self.params.get('logger') is not None: self.params['logger'].warning(message) else: if self.params.get('no_warnings'): return if not self.params.get('no_color') and self._err_file.isatty() and os.name != 'nt': _msg_header = '\033[0;33mWARNING:\033[0m' else: _msg_header = 'WARNING:' warning_message = '%s %s' % (_msg_header, message) self.to_stderr(warning_message) def report_error(self, message, tb=None): ''' Do the same as trouble, but prefixes the message with 'ERROR:', colored in red if stderr is a tty file. ''' if not self.params.get('no_color') and self._err_file.isatty() and os.name != 'nt': _msg_header = '\033[0;31mERROR:\033[0m' else: _msg_header = 'ERROR:' error_message = '%s %s' % (_msg_header, message) self.trouble(error_message, tb) def report_file_already_downloaded(self, file_name): """Report file has already been fully downloaded.""" try: self.to_screen('[download] %s has already been downloaded' % file_name) except UnicodeEncodeError: self.to_screen('[download] The file has already been downloaded') def prepare_filename(self, info_dict): """Generate the output filename.""" try: template_dict = dict(info_dict) template_dict['epoch'] = int(time.time()) autonumber_size = self.params.get('autonumber_size') if autonumber_size is None: autonumber_size = 5 autonumber_templ = '%0' + str(autonumber_size) + 'd' template_dict['autonumber'] = autonumber_templ % self._num_downloads if template_dict.get('playlist_index') is not None: template_dict['playlist_index'] = '%0*d' % (len(str(template_dict['n_entries'])), template_dict['playlist_index']) if template_dict.get('resolution') is None: if template_dict.get('width') and template_dict.get('height'): template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height']) elif template_dict.get('height'): template_dict['resolution'] = '%sp' % template_dict['height'] elif template_dict.get('width'): template_dict['resolution'] = '?x%d' % template_dict['width'] sanitize = lambda k, v: sanitize_filename( compat_str(v), restricted=self.params.get('restrictfilenames'), is_id=(k == 'id')) template_dict = dict((k, sanitize(k, v)) for k, v in template_dict.items() if v is not None) template_dict = collections.defaultdict(lambda: 'NA', template_dict) outtmpl = sanitize_path(self.params.get('outtmpl', DEFAULT_OUTTMPL)) tmpl = compat_expanduser(outtmpl) filename = tmpl % template_dict # Temporary fix for #4787 # 'Treat' all problem characters by passing filename through preferredencoding # to workaround encoding issues with subprocess on python2 @ Windows if sys.version_info < (3, 0) and sys.platform == 'win32': filename = encodeFilename(filename, True).decode(preferredencoding()) return filename except ValueError as err: self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')') return None def _match_entry(self, info_dict, incomplete): """ Returns None iff the file should be downloaded """ video_title = info_dict.get('title', info_dict.get('id', 'video')) if 'title' in info_dict: # This can happen when we're just evaluating the playlist title = info_dict['title'] matchtitle = self.params.get('matchtitle', False) if matchtitle: if not re.search(matchtitle, title, re.IGNORECASE): return '"' + title + '" title did not match pattern "' + matchtitle + '"' rejecttitle = self.params.get('rejecttitle', False) if rejecttitle: if re.search(rejecttitle, title, re.IGNORECASE): return '"' + title + '" title matched reject pattern "' + rejecttitle + '"' date = info_dict.get('upload_date', None) if date is not None: dateRange = self.params.get('daterange', DateRange()) if date not in dateRange: return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange) view_count = info_dict.get('view_count', None) if view_count is not None: min_views = self.params.get('min_views') if min_views is not None and view_count < min_views: return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views) max_views = self.params.get('max_views') if max_views is not None and view_count > max_views: return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views) if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')): return 'Skipping "%s" because it is age restricted' % video_title if self.in_download_archive(info_dict): return '%s has already been recorded in archive' % video_title if not incomplete: match_filter = self.params.get('match_filter') if match_filter is not None: ret = match_filter(info_dict) if ret is not None: return ret return None @staticmethod def add_extra_info(info_dict, extra_info): '''Set the keys from extra_info in info dict if they are missing''' for key, value in extra_info.items(): info_dict.setdefault(key, value) def extract_info(self, url, download=True, ie_key=None, extra_info={}, process=True, force_generic_extractor=False): ''' Returns a list with a dictionary for each video we find. If 'download', also downloads the videos. extra_info is a dict containing the extra values to add to each result ''' if not ie_key and force_generic_extractor: ie_key = 'Generic' if ie_key: ies = [self.get_info_extractor(ie_key)] else: ies = self._ies for ie in ies: if not ie.suitable(url): continue if not ie.working(): self.report_warning('The program functionality for this site has been marked as broken, ' 'and will probably not work.') try: ie_result = ie.extract(url) if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here) break if isinstance(ie_result, list): # Backwards compatibility: old IE result format ie_result = { '_type': 'compat_list', 'entries': ie_result, } self.add_default_extra_info(ie_result, ie, url) if process: return self.process_ie_result(ie_result, download, extra_info) else: return ie_result except ExtractorError as de: # An error we somewhat expected self.report_error(compat_str(de), de.format_traceback()) break except MaxDownloadsReached: raise except Exception as e: if self.params.get('ignoreerrors', False): self.report_error(compat_str(e), tb=compat_str(traceback.format_exc())) break else: raise else: self.report_error('no suitable InfoExtractor for URL %s' % url) def add_default_extra_info(self, ie_result, ie, url): self.add_extra_info(ie_result, { 'extractor': ie.IE_NAME, 'webpage_url': url, 'webpage_url_basename': url_basename(url), 'extractor_key': ie.ie_key(), }) def process_ie_result(self, ie_result, download=True, extra_info={}): """ Take the result of the ie(may be modified) and resolve all unresolved references (URLs, playlist items). It will also download the videos if 'download'. Returns the resolved ie_result. """ result_type = ie_result.get('_type', 'video') if result_type in ('url', 'url_transparent'): extract_flat = self.params.get('extract_flat', False) if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or extract_flat is True): if self.params.get('forcejson', False): self.to_stdout(json.dumps(ie_result)) return ie_result if result_type == 'video': self.add_extra_info(ie_result, extra_info) return self.process_video_result(ie_result, download=download) elif result_type == 'url': # We have to add extra_info to the results because it may be # contained in a playlist return self.extract_info(ie_result['url'], download, ie_key=ie_result.get('ie_key'), extra_info=extra_info) elif result_type == 'url_transparent': # Use the information from the embedding page info = self.extract_info( ie_result['url'], ie_key=ie_result.get('ie_key'), extra_info=extra_info, download=False, process=False) force_properties = dict( (k, v) for k, v in ie_result.items() if v is not None) for f in ('_type', 'url'): if f in force_properties: del force_properties[f] new_result = info.copy() new_result.update(force_properties) assert new_result.get('_type') != 'url_transparent' return self.process_ie_result( new_result, download=download, extra_info=extra_info) elif result_type == 'playlist' or result_type == 'multi_video': # We process each entry in the playlist playlist = ie_result.get('title', None) or ie_result.get('id', None) self.to_screen('[download] Downloading playlist: %s' % playlist) playlist_results = [] playliststart = self.params.get('playliststart', 1) - 1 playlistend = self.params.get('playlistend', None) # For backwards compatibility, interpret -1 as whole list if playlistend == -1: playlistend = None playlistitems_str = self.params.get('playlist_items', None) playlistitems = None if playlistitems_str is not None: def iter_playlistitems(format): for string_segment in format.split(','): if '-' in string_segment: start, end = string_segment.split('-') for item in range(int(start), int(end) + 1): yield int(item) else: yield int(string_segment) playlistitems = iter_playlistitems(playlistitems_str) ie_entries = ie_result['entries'] if isinstance(ie_entries, list): n_all_entries = len(ie_entries) if playlistitems: entries = [ ie_entries[i - 1] for i in playlistitems if -n_all_entries <= i - 1 < n_all_entries] else: entries = ie_entries[playliststart:playlistend] n_entries = len(entries) self.to_screen( "[%s] playlist %s: Collected %d video ids (downloading %d of them)" % (ie_result['extractor'], playlist, n_all_entries, n_entries)) elif isinstance(ie_entries, PagedList): if playlistitems: entries = [] for item in playlistitems: entries.extend(ie_entries.getslice( item - 1, item )) else: entries = ie_entries.getslice( playliststart, playlistend) n_entries = len(entries) self.to_screen( "[%s] playlist %s: Downloading %d videos" % (ie_result['extractor'], playlist, n_entries)) else: # iterable if playlistitems: entry_list = list(ie_entries) entries = [entry_list[i - 1] for i in playlistitems] else: entries = list(itertools.islice( ie_entries, playliststart, playlistend)) n_entries = len(entries) self.to_screen( "[%s] playlist %s: Downloading %d videos" % (ie_result['extractor'], playlist, n_entries)) if self.params.get('playlistreverse', False): entries = entries[::-1] for i, entry in enumerate(entries, 1): self.to_screen('[download] Downloading video %s of %s' % (i, n_entries)) extra = { 'n_entries': n_entries, 'playlist': playlist, 'playlist_id': ie_result.get('id'), 'playlist_title': ie_result.get('title'), 'playlist_index': i + playliststart, 'extractor': ie_result['extractor'], 'webpage_url': ie_result['webpage_url'], 'webpage_url_basename': url_basename(ie_result['webpage_url']), 'extractor_key': ie_result['extractor_key'], } reason = self._match_entry(entry, incomplete=True) if reason is not None: self.to_screen('[download] ' + reason) continue entry_result = self.process_ie_result(entry, download=download, extra_info=extra) playlist_results.append(entry_result) ie_result['entries'] = playlist_results return ie_result elif result_type == 'compat_list': self.report_warning( 'Extractor %s returned a compat_list result. ' 'It needs to be updated.' % ie_result.get('extractor')) def _fixup(r): self.add_extra_info( r, { 'extractor': ie_result['extractor'], 'webpage_url': ie_result['webpage_url'], 'webpage_url_basename': url_basename(ie_result['webpage_url']), 'extractor_key': ie_result['extractor_key'], } ) return r ie_result['entries'] = [ self.process_ie_result(_fixup(r), download, extra_info) for r in ie_result['entries'] ] return ie_result else: raise Exception('Invalid result type: %s' % result_type) def _build_format_filter(self, filter_spec): " Returns a function to filter the formats according to the filter_spec " OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>width|height|tbr|abr|vbr|asr|filesize|fps) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?) $ ''' % '|'.join(map(re.escape, OPERATORS.keys()))) m = operator_rex.search(filter_spec) if m: try: comparison_value = int(m.group('value')) except ValueError: comparison_value = parse_filesize(m.group('value')) if comparison_value is None: comparison_value = parse_filesize(m.group('value') + 'B') if comparison_value is None: raise ValueError( 'Invalid value %r in format specification %r' % ( m.group('value'), filter_spec)) op = OPERATORS[m.group('op')] if not m: STR_OPERATORS = { '=': operator.eq, '!=': operator.ne, } str_operator_rex = re.compile(r'''(?x) \s*(?P<key>ext|acodec|vcodec|container|protocol) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)? \s*(?P<value>[a-zA-Z0-9_-]+) \s*$ ''' % '|'.join(map(re.escape, STR_OPERATORS.keys()))) m = str_operator_rex.search(filter_spec) if m: comparison_value = m.group('value') op = STR_OPERATORS[m.group('op')] if not m: raise ValueError('Invalid filter specification %r' % filter_spec) def _filter(f): actual_value = f.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) return _filter def build_format_selector(self, format_spec): def syntax_error(note, start): message = ( 'Invalid format specification: ' '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1])) return SyntaxError(message) PICKFIRST = 'PICKFIRST' MERGE = 'MERGE' SINGLE = 'SINGLE' GROUP = 'GROUP' FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters']) def _parse_filter(tokens): filter_parts = [] for type, string, start, _, _ in tokens: if type == tokenize.OP and string == ']': return ''.join(filter_parts) else: filter_parts.append(string) def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False): selectors = [] current_selector = None for type, string, start, _, _ in tokens: # ENCODING is only defined in python 3.x if type == getattr(tokenize, 'ENCODING', None): continue elif type in [tokenize.NAME, tokenize.NUMBER]: current_selector = FormatSelector(SINGLE, string, []) elif type == tokenize.OP: if string == ')': if not inside_group: # ')' will be handled by the parentheses group tokens.restore_last_token() break elif inside_merge and string in ['/', ',']: tokens.restore_last_token() break elif inside_choice and string == ',': tokens.restore_last_token() break elif string == ',': selectors.append(current_selector) current_selector = None elif string == '/': first_choice = current_selector second_choice = _parse_format_selection(tokens, inside_choice=True) current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), []) elif string == '[': if not current_selector: current_selector = FormatSelector(SINGLE, 'best', []) format_filter = _parse_filter(tokens) current_selector.filters.append(format_filter) elif string == '(': if current_selector: raise syntax_error('Unexpected "("', start) group = _parse_format_selection(tokens, inside_group=True) current_selector = FormatSelector(GROUP, group, []) elif string == '+': video_selector = current_selector audio_selector = _parse_format_selection(tokens, inside_merge=True) current_selector = FormatSelector(MERGE, (video_selector, audio_selector), []) else: raise syntax_error('Operator not recognized: "{0}"'.format(string), start) elif type == tokenize.ENDMARKER: break if current_selector: selectors.append(current_selector) return selectors def _build_selector_function(selector): if isinstance(selector, list): fs = [_build_selector_function(s) for s in selector] def selector_function(formats): for f in fs: for format in f(formats): yield format return selector_function elif selector.type == GROUP: selector_function = _build_selector_function(selector.selector) elif selector.type == PICKFIRST: fs = [_build_selector_function(s) for s in selector.selector] def selector_function(formats): for f in fs: picked_formats = list(f(formats)) if picked_formats: return picked_formats return [] elif selector.type == SINGLE: format_spec = selector.selector def selector_function(formats): if format_spec == 'all': for f in formats: yield f elif format_spec in ['best', 'worst', None]: format_idx = 0 if format_spec == 'worst' else -1 audiovideo_formats = [ f for f in formats if f.get('vcodec') != 'none' and f.get('acodec') != 'none'] if audiovideo_formats: yield audiovideo_formats[format_idx] # for audio only (soundcloud) or video only (imgur) urls, select the best/worst audio format elif (all(f.get('acodec') != 'none' for f in formats) or all(f.get('vcodec') != 'none' for f in formats)): yield formats[format_idx] elif format_spec == 'bestaudio': audio_formats = [ f for f in formats if f.get('vcodec') == 'none'] if audio_formats: yield audio_formats[-1] elif format_spec == 'worstaudio': audio_formats = [ f for f in formats if f.get('vcodec') == 'none'] if audio_formats: yield audio_formats[0] elif format_spec == 'bestvideo': video_formats = [ f for f in formats if f.get('acodec') == 'none'] if video_formats: yield video_formats[-1] elif format_spec == 'worstvideo': video_formats = [ f for f in formats if f.get('acodec') == 'none'] if video_formats: yield video_formats[0] else: extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav'] if format_spec in extensions: filter_f = lambda f: f['ext'] == format_spec else: filter_f = lambda f: f['format_id'] == format_spec matches = list(filter(filter_f, formats)) if matches: yield matches[-1] elif selector.type == MERGE: def _merge(formats_info): format_1, format_2 = [f['format_id'] for f in formats_info] # The first format must contain the video and the # second the audio if formats_info[0].get('vcodec') == 'none': self.report_error('The first format must ' 'contain the video, try using ' '"-f %s+%s"' % (format_2, format_1)) return output_ext = ( formats_info[0]['ext'] if self.params.get('merge_output_format') is None else self.params['merge_output_format']) return { 'requested_formats': formats_info, 'format': '%s+%s' % (formats_info[0].get('format'), formats_info[1].get('format')), 'format_id': '%s+%s' % (formats_info[0].get('format_id'), formats_info[1].get('format_id')), 'width': formats_info[0].get('width'), 'height': formats_info[0].get('height'), 'resolution': formats_info[0].get('resolution'), 'fps': formats_info[0].get('fps'), 'vcodec': formats_info[0].get('vcodec'), 'vbr': formats_info[0].get('vbr'), 'stretched_ratio': formats_info[0].get('stretched_ratio'), 'acodec': formats_info[1].get('acodec'), 'abr': formats_info[1].get('abr'), 'ext': output_ext, } video_selector, audio_selector = map(_build_selector_function, selector.selector) def selector_function(formats): formats = list(formats) for pair in itertools.product(video_selector(formats), audio_selector(formats)): yield _merge(pair) filters = [self._build_format_filter(f) for f in selector.filters] def final_selector(formats): for _filter in filters: formats = list(filter(_filter, formats)) return selector_function(formats) return final_selector stream = io.BytesIO(format_spec.encode('utf-8')) try: tokens = list(compat_tokenize_tokenize(stream.readline)) except tokenize.TokenError: raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec))) class TokenIterator(object): def __init__(self, tokens): self.tokens = tokens self.counter = 0 def __iter__(self): return self def __next__(self): if self.counter >= len(self.tokens): raise StopIteration() value = self.tokens[self.counter] self.counter += 1 return value next = __next__ def restore_last_token(self): self.counter -= 1 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens))) return _build_selector_function(parsed_selector) def _calc_headers(self, info_dict): res = std_headers.copy() add_headers = info_dict.get('http_headers') if add_headers: res.update(add_headers) cookies = self._calc_cookies(info_dict) if cookies: res['Cookie'] = cookies return res def _calc_cookies(self, info_dict): pr = compat_urllib_request.Request(info_dict['url']) self.cookiejar.add_cookie_header(pr) return pr.get_header('Cookie') def process_video_result(self, info_dict, download=True): assert info_dict.get('_type', 'video') == 'video' if 'id' not in info_dict: raise ExtractorError('Missing "id" field in extractor result') if 'title' not in info_dict: raise ExtractorError('Missing "title" field in extractor result') if 'playlist' not in info_dict: # It isn't part of a playlist info_dict['playlist'] = None info_dict['playlist_index'] = None thumbnails = info_dict.get('thumbnails') if thumbnails is None: thumbnail = info_dict.get('thumbnail') if thumbnail: info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}] if thumbnails: thumbnails.sort(key=lambda t: ( t.get('preference'), t.get('width'), t.get('height'), t.get('id'), t.get('url'))) for i, t in enumerate(thumbnails): if 'width' in t and 'height' in t: t['resolution'] = '%dx%d' % (t['width'], t['height']) if t.get('id') is None: t['id'] = '%d' % i if thumbnails and 'thumbnail' not in info_dict: info_dict['thumbnail'] = thumbnails[-1]['url'] if 'display_id' not in info_dict and 'id' in info_dict: info_dict['display_id'] = info_dict['id'] if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None: # Working around out-of-range timestamp values (e.g. negative ones on Windows, # see http://bugs.python.org/issue1646728) try: upload_date = datetime.datetime.utcfromtimestamp(info_dict['timestamp']) info_dict['upload_date'] = upload_date.strftime('%Y%m%d') except (ValueError, OverflowError, OSError): pass if self.params.get('listsubtitles', False): if 'automatic_captions' in info_dict: self.list_subtitles(info_dict['id'], info_dict.get('automatic_captions'), 'automatic captions') self.list_subtitles(info_dict['id'], info_dict.get('subtitles'), 'subtitles') return info_dict['requested_subtitles'] = self.process_subtitles( info_dict['id'], info_dict.get('subtitles'), info_dict.get('automatic_captions')) # We now pick which formats have to be downloaded if info_dict.get('formats') is None: # There's only one format available formats = [info_dict] else: formats = info_dict['formats'] if not formats: raise ExtractorError('No video formats found!') formats_dict = {} # We check that all the formats have the format and format_id fields for i, format in enumerate(formats): if 'url' not in format: raise ExtractorError('Missing "url" key in result (index %d)' % i) if format.get('format_id') is None: format['format_id'] = compat_str(i) format_id = format['format_id'] if format_id not in formats_dict: formats_dict[format_id] = [] formats_dict[format_id].append(format) # Make sure all formats have unique format_id for format_id, ambiguous_formats in formats_dict.items(): if len(ambiguous_formats) > 1: for i, format in enumerate(ambiguous_formats): format['format_id'] = '%s-%d' % (format_id, i) for i, format in enumerate(formats): if format.get('format') is None: format['format'] = '{id} - {res}{note}'.format( id=format['format_id'], res=self.format_resolution(format), note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '', ) # Automatically determine file extension if missing if 'ext' not in format: format['ext'] = determine_ext(format['url']).lower() # Add HTTP headers, so that external programs can use them from the # json output full_format_info = info_dict.copy() full_format_info.update(format) format['http_headers'] = self._calc_headers(full_format_info) # TODO Central sorting goes here if formats[0] is not info_dict: # only set the 'formats' fields if the original info_dict list them # otherwise we end up with a circular reference, the first (and unique) # element in the 'formats' field in info_dict is info_dict itself, # wich can't be exported to json info_dict['formats'] = formats if self.params.get('listformats'): self.list_formats(info_dict) return if self.params.get('list_thumbnails'): self.list_thumbnails(info_dict) return req_format = self.params.get('format') if req_format is None: req_format_list = [] if (self.params.get('outtmpl', DEFAULT_OUTTMPL) != '-' and info_dict['extractor'] in ['youtube', 'ted']): merger = FFmpegMergerPP(self) if merger.available and merger.can_merge(): req_format_list.append('bestvideo+bestaudio') req_format_list.append('best') req_format = '/'.join(req_format_list) format_selector = self.build_format_selector(req_format) formats_to_download = list(format_selector(formats)) if not formats_to_download: raise ExtractorError('requested format not available', expected=True) if download: if len(formats_to_download) > 1: self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download))) for format in formats_to_download: new_info = dict(info_dict) new_info.update(format) self.process_info(new_info) # We update the info dict with the best quality format (backwards compatibility) info_dict.update(formats_to_download[-1]) return info_dict def process_subtitles(self, video_id, normal_subtitles, automatic_captions): """Select the requested subtitles and their format""" available_subs = {} if normal_subtitles and self.params.get('writesubtitles'): available_subs.update(normal_subtitles) if automatic_captions and self.params.get('writeautomaticsub'): for lang, cap_info in automatic_captions.items(): if lang not in available_subs: available_subs[lang] = cap_info if (not self.params.get('writesubtitles') and not self.params.get('writeautomaticsub') or not available_subs): return None if self.params.get('allsubtitles', False): requested_langs = available_subs.keys() else: if self.params.get('subtitleslangs', False): requested_langs = self.params.get('subtitleslangs') elif 'en' in available_subs: requested_langs = ['en'] else: requested_langs = [list(available_subs.keys())[0]] formats_query = self.params.get('subtitlesformat', 'best') formats_preference = formats_query.split('/') if formats_query else [] subs = {} for lang in requested_langs: formats = available_subs.get(lang) if formats is None: self.report_warning('%s subtitles not available for %s' % (lang, video_id)) continue for ext in formats_preference: if ext == 'best': f = formats[-1] break matches = list(filter(lambda f: f['ext'] == ext, formats)) if matches: f = matches[-1] break else: f = formats[-1] self.report_warning( 'No subtitle format found matching "%s" for language %s, ' 'using %s' % (formats_query, lang, f['ext'])) subs[lang] = f return subs def process_info(self, info_dict): """Process a single resolved IE result.""" assert info_dict.get('_type', 'video') == 'video' max_downloads = self.params.get('max_downloads') if max_downloads is not None: if self._num_downloads >= int(max_downloads): raise MaxDownloadsReached() info_dict['fulltitle'] = info_dict['title'] if len(info_dict['title']) > 200: info_dict['title'] = info_dict['title'][:197] + '...' if 'format' not in info_dict: info_dict['format'] = info_dict['ext'] reason = self._match_entry(info_dict, incomplete=False) if reason is not None: self.to_screen('[download] ' + reason) return self._num_downloads += 1 info_dict['_filename'] = filename = self.prepare_filename(info_dict) # Forced printings if self.params.get('forcetitle', False): self.to_stdout(info_dict['fulltitle']) if self.params.get('forceid', False): self.to_stdout(info_dict['id']) if self.params.get('forceurl', False): if info_dict.get('requested_formats') is not None: for f in info_dict['requested_formats']: self.to_stdout(f['url'] + f.get('play_path', '')) else: # For RTMP URLs, also include the playpath self.to_stdout(info_dict['url'] + info_dict.get('play_path', '')) if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None: self.to_stdout(info_dict['thumbnail']) if self.params.get('forcedescription', False) and info_dict.get('description') is not None: self.to_stdout(info_dict['description']) if self.params.get('forcefilename', False) and filename is not None: self.to_stdout(filename) if self.params.get('forceduration', False) and info_dict.get('duration') is not None: self.to_stdout(formatSeconds(info_dict['duration'])) if self.params.get('forceformat', False): self.to_stdout(info_dict['format']) if self.params.get('forcejson', False): self.to_stdout(json.dumps(info_dict)) # Do nothing else if in simulate mode if self.params.get('simulate', False): return if filename is None: return try: dn = os.path.dirname(sanitize_path(encodeFilename(filename))) if dn and not os.path.exists(dn): os.makedirs(dn) except (OSError, IOError) as err: self.report_error('unable to create directory ' + compat_str(err)) return if self.params.get('writedescription', False): descfn = replace_extension(filename, 'description', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)): self.to_screen('[info] Video description is already present') elif info_dict.get('description') is None: self.report_warning('There\'s no description to write.') else: try: self.to_screen('[info] Writing video description to: ' + descfn) with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: descfile.write(info_dict['description']) except (OSError, IOError): self.report_error('Cannot write description file ' + descfn) return if self.params.get('writeannotations', False): annofn = replace_extension(filename, 'annotations.xml', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)): self.to_screen('[info] Video annotations are already present') else: try: self.to_screen('[info] Writing video annotations to: ' + annofn) with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile: annofile.write(info_dict['annotations']) except (KeyError, TypeError): self.report_warning('There are no annotations to write.') except (OSError, IOError): self.report_error('Cannot write annotations file: ' + annofn) return subtitles_are_requested = any([self.params.get('writesubtitles', False), self.params.get('writeautomaticsub')]) if subtitles_are_requested and info_dict.get('requested_subtitles'): # subtitles download errors are already managed as troubles in relevant IE # that way it will silently go on when used with unsupporting IE subtitles = info_dict['requested_subtitles'] ie = self.get_info_extractor(info_dict['extractor_key']) for sub_lang, sub_info in subtitles.items(): sub_format = sub_info['ext'] if sub_info.get('data') is not None: sub_data = sub_info['data'] else: try: sub_data = ie._download_webpage( sub_info['url'], info_dict['id'], note=False) except ExtractorError as err: self.report_warning('Unable to download subtitle for "%s": %s' % (sub_lang, compat_str(err.cause))) continue try: sub_filename = subtitles_filename(filename, sub_lang, sub_format) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)): self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format)) else: self.to_screen('[info] Writing video subtitles to: ' + sub_filename) with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile: subfile.write(sub_data) except (OSError, IOError): self.report_error('Cannot write subtitles file ' + sub_filename) return if self.params.get('writeinfojson', False): infofn = replace_extension(filename, 'info.json', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)): self.to_screen('[info] Video description metadata is already present') else: self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn) try: write_json_file(self.filter_requested_info(info_dict), infofn) except (OSError, IOError): self.report_error('Cannot write metadata to JSON file ' + infofn) return self._write_thumbnails(info_dict, filename) if not self.params.get('skip_download', False): try: def dl(name, info): fd = get_suitable_downloader(info, self.params)(self, self.params) for ph in self._progress_hooks: fd.add_progress_hook(ph) if self.params.get('verbose'): self.to_stdout('[debug] Invoking downloader on %r' % info.get('url')) return fd.download(name, info) if info_dict.get('requested_formats') is not None: downloaded = [] success = True merger = FFmpegMergerPP(self) if not merger.available: postprocessors = [] self.report_warning('You have requested multiple ' 'formats but ffmpeg or avconv are not installed.' ' The formats won\'t be merged.') else: postprocessors = [merger] def compatible_formats(formats): video, audio = formats # Check extension video_ext, audio_ext = audio.get('ext'), video.get('ext') if video_ext and audio_ext: COMPATIBLE_EXTS = ( ('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v'), ('webm') ) for exts in COMPATIBLE_EXTS: if video_ext in exts and audio_ext in exts: return True # TODO: Check acodec/vcodec return False filename_real_ext = os.path.splitext(filename)[1][1:] filename_wo_ext = ( os.path.splitext(filename)[0] if filename_real_ext == info_dict['ext'] else filename) requested_formats = info_dict['requested_formats'] if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats): info_dict['ext'] = 'mkv' self.report_warning( 'Requested formats are incompatible for merge and will be merged into mkv.') # Ensure filename always has a correct extension for successful merge filename = '%s.%s' % (filename_wo_ext, info_dict['ext']) if os.path.exists(encodeFilename(filename)): self.to_screen( '[download] %s has already been downloaded and ' 'merged' % filename) else: for f in requested_formats: new_info = dict(info_dict) new_info.update(f) fname = self.prepare_filename(new_info) fname = prepend_extension(fname, 'f%s' % f['format_id'], new_info['ext']) downloaded.append(fname) partial_success = dl(fname, new_info) success = success and partial_success info_dict['__postprocessors'] = postprocessors info_dict['__files_to_merge'] = downloaded else: # Just a single file success = dl(filename, info_dict) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_error('unable to download video data: %s' % str(err)) return except (OSError, IOError) as err: raise UnavailableVideoError(err) except (ContentTooShortError, ) as err: self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) return if success: # Fixup content fixup_policy = self.params.get('fixup') if fixup_policy is None: fixup_policy = 'detect_or_warn' stretched_ratio = info_dict.get('stretched_ratio') if stretched_ratio is not None and stretched_ratio != 1: if fixup_policy == 'warn': self.report_warning('%s: Non-uniform pixel ratio (%s)' % ( info_dict['id'], stretched_ratio)) elif fixup_policy == 'detect_or_warn': stretched_pp = FFmpegFixupStretchedPP(self) if stretched_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(stretched_pp) else: self.report_warning( '%s: Non-uniform pixel ratio (%s). Install ffmpeg or avconv to fix this automatically.' % ( info_dict['id'], stretched_ratio)) else: assert fixup_policy in ('ignore', 'never') if info_dict.get('requested_formats') is None and info_dict.get('container') == 'm4a_dash': if fixup_policy == 'warn': self.report_warning('%s: writing DASH m4a. Only some players support this container.' % ( info_dict['id'])) elif fixup_policy == 'detect_or_warn': fixup_pp = FFmpegFixupM4aPP(self) if fixup_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(fixup_pp) else: self.report_warning( '%s: writing DASH m4a. Only some players support this container. Install ffmpeg or avconv to fix this automatically.' % ( info_dict['id'])) else: assert fixup_policy in ('ignore', 'never') try: self.post_process(filename, info_dict) except (PostProcessingError) as err: self.report_error('postprocessing: %s' % str(err)) return self.record_download_archive(info_dict) def download(self, url_list): """Download a given list of URLs.""" outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) if (len(url_list) > 1 and '%' not in outtmpl and self.params.get('max_downloads') != 1): raise SameFileError(outtmpl) for url in url_list: try: # It also downloads the videos res = self.extract_info( url, force_generic_extractor=self.params.get('force_generic_extractor', False)) except UnavailableVideoError: self.report_error('unable to download video') except MaxDownloadsReached: self.to_screen('[info] Maximum number of downloaded files reached.') raise else: if self.params.get('dump_single_json', False): self.to_stdout(json.dumps(res)) return self._download_retcode def download_with_info_file(self, info_filename): with contextlib.closing(fileinput.FileInput( [info_filename], mode='r', openhook=fileinput.hook_encoded('utf-8'))) as f: # FileInput doesn't have a read method, we can't call json.load info = self.filter_requested_info(json.loads('\n'.join(f))) try: self.process_ie_result(info, download=True) except DownloadError: webpage_url = info.get('webpage_url') if webpage_url is not None: self.report_warning('The info failed to download, trying with "%s"' % webpage_url) return self.download([webpage_url]) else: raise return self._download_retcode @staticmethod def filter_requested_info(info_dict): return dict( (k, v) for k, v in info_dict.items() if k not in ['requested_formats', 'requested_subtitles']) def post_process(self, filename, ie_info): """Run all the postprocessors on the given file.""" info = dict(ie_info) info['filepath'] = filename pps_chain = [] if ie_info.get('__postprocessors') is not None: pps_chain.extend(ie_info['__postprocessors']) pps_chain.extend(self._pps) for pp in pps_chain: files_to_delete = [] try: files_to_delete, info = pp.run(info) except PostProcessingError as e: self.report_error(e.msg) if files_to_delete and not self.params.get('keepvideo', False): for old_filename in files_to_delete: self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename) try: os.remove(encodeFilename(old_filename)) except (IOError, OSError): self.report_warning('Unable to remove downloaded original file') def _make_archive_id(self, info_dict): # Future-proof against any change in case # and backwards compatibility with prior versions extractor = info_dict.get('extractor_key') if extractor is None: if 'id' in info_dict: extractor = info_dict.get('ie_key') # key in a playlist if extractor is None: return None # Incomplete video information return extractor.lower() + ' ' + info_dict['id'] def in_download_archive(self, info_dict): fn = self.params.get('download_archive') if fn is None: return False vid_id = self._make_archive_id(info_dict) if vid_id is None: return False # Incomplete video information try: with locked_file(fn, 'r', encoding='utf-8') as archive_file: for line in archive_file: if line.strip() == vid_id: return True except IOError as ioe: if ioe.errno != errno.ENOENT: raise return False def record_download_archive(self, info_dict): fn = self.params.get('download_archive') if fn is None: return vid_id = self._make_archive_id(info_dict) assert vid_id with locked_file(fn, 'a', encoding='utf-8') as archive_file: archive_file.write(vid_id + '\n') @staticmethod def format_resolution(format, default='unknown'): if format.get('vcodec') == 'none': return 'audio only' if format.get('resolution') is not None: return format['resolution'] if format.get('height') is not None: if format.get('width') is not None: res = '%sx%s' % (format['width'], format['height']) else: res = '%sp' % format['height'] elif format.get('width') is not None: res = '?x%d' % format['width'] else: res = default return res def _format_note(self, fdict): res = '' if fdict.get('ext') in ['f4f', 'f4m']: res += '(unsupported) ' if fdict.get('format_note') is not None: res += fdict['format_note'] + ' ' if fdict.get('tbr') is not None: res += '%4dk ' % fdict['tbr'] if fdict.get('container') is not None: if res: res += ', ' res += '%s container' % fdict['container'] if (fdict.get('vcodec') is not None and fdict.get('vcodec') != 'none'): if res: res += ', ' res += fdict['vcodec'] if fdict.get('vbr') is not None: res += '@' elif fdict.get('vbr') is not None and fdict.get('abr') is not None: res += 'video@' if fdict.get('vbr') is not None: res += '%4dk' % fdict['vbr'] if fdict.get('fps') is not None: res += ', %sfps' % fdict['fps'] if fdict.get('acodec') is not None: if res: res += ', ' if fdict['acodec'] == 'none': res += 'video only' else: res += '%-5s' % fdict['acodec'] elif fdict.get('abr') is not None: if res: res += ', ' res += 'audio' if fdict.get('abr') is not None: res += '@%3dk' % fdict['abr'] if fdict.get('asr') is not None: res += ' (%5dHz)' % fdict['asr'] if fdict.get('filesize') is not None: if res: res += ', ' res += format_bytes(fdict['filesize']) elif fdict.get('filesize_approx') is not None: if res: res += ', ' res += '~' + format_bytes(fdict['filesize_approx']) return res def list_formats(self, info_dict): formats = info_dict.get('formats', [info_dict]) table = [ [f['format_id'], f['ext'], self.format_resolution(f), self._format_note(f)] for f in formats if f.get('preference') is None or f['preference'] >= -1000] if len(formats) > 1: table[-1][-1] += (' ' if table[-1][-1] else '') + '(best)' header_line = ['format code', 'extension', 'resolution', 'note'] self.to_screen( '[info] Available formats for %s:\n%s' % (info_dict['id'], render_table(header_line, table))) def list_thumbnails(self, info_dict): thumbnails = info_dict.get('thumbnails') if not thumbnails: tn_url = info_dict.get('thumbnail') if tn_url: thumbnails = [{'id': '0', 'url': tn_url}] else: self.to_screen( '[info] No thumbnails present for %s' % info_dict['id']) return self.to_screen( '[info] Thumbnails for %s:' % info_dict['id']) self.to_screen(render_table( ['ID', 'width', 'height', 'URL'], [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails])) def list_subtitles(self, video_id, subtitles, name='subtitles'): if not subtitles: self.to_screen('%s has no %s' % (video_id, name)) return self.to_screen( 'Available %s for %s:' % (name, video_id)) self.to_screen(render_table( ['Language', 'formats'], [[lang, ', '.join(f['ext'] for f in reversed(formats))] for lang, formats in subtitles.items()])) def urlopen(self, req): """ Start an HTTP download """ # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one req_is_string = isinstance(req, compat_basestring) url = req if req_is_string else req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: if req_is_string: req = url_escaped else: req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request req = req_type( url_escaped, data=req.data, headers=req.headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) return self._opener.open(req, timeout=self._socket_timeout) def print_debug_header(self): if not self.params.get('verbose'): return if type('') is not compat_str: # Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326) self.report_warning( 'Your Python is broken! Update to a newer and supported version') stdout_encoding = getattr( sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__) encoding_str = ( '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % ( locale.getpreferredencoding(), sys.getfilesystemencoding(), stdout_encoding, self.get_encoding())) write_string(encoding_str, encoding=None) self._write_string('[debug] youtube-dl version ' + __version__ + '\n') try: sp = subprocess.Popen( ['git', 'rev-parse', '--short', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=os.path.dirname(os.path.abspath(__file__))) out, err = sp.communicate() out = out.decode().strip() if re.match('[0-9a-f]+', out): self._write_string('[debug] Git HEAD: ' + out + '\n') except Exception: try: sys.exc_clear() except Exception: pass self._write_string('[debug] Python version %s - %s\n' % ( platform.python_version(), platform_name())) exe_versions = FFmpegPostProcessor.get_versions(self) exe_versions['rtmpdump'] = rtmpdump_version() exe_str = ', '.join( '%s %s' % (exe, v) for exe, v in sorted(exe_versions.items()) if v ) if not exe_str: exe_str = 'none' self._write_string('[debug] exe versions: %s\n' % exe_str) proxy_map = {} for handler in self._opener.handlers: if hasattr(handler, 'proxies'): proxy_map.update(handler.proxies) self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n') if self.params.get('call_home', False): ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8') self._write_string('[debug] Public IP address: %s\n' % ipaddr) latest_version = self.urlopen( 'https://yt-dl.org/latest/version').read().decode('utf-8') if version_tuple(latest_version) > version_tuple(__version__): self.report_warning( 'You are using an outdated version (newest version: %s)! ' 'See https://yt-dl.org/update if you need help updating.' % latest_version) def _setup_opener(self): timeout_val = self.params.get('socket_timeout') self._socket_timeout = 600 if timeout_val is None else float(timeout_val) opts_cookiefile = self.params.get('cookiefile') opts_proxy = self.params.get('proxy') if opts_cookiefile is None: self.cookiejar = compat_cookiejar.CookieJar() else: self.cookiejar = compat_cookiejar.MozillaCookieJar( opts_cookiefile) if os.access(opts_cookiefile, os.R_OK): self.cookiejar.load() cookie_processor = compat_urllib_request.HTTPCookieProcessor( self.cookiejar) if opts_proxy is not None: if opts_proxy == '': proxies = {} else: proxies = {'http': opts_proxy, 'https': opts_proxy} else: proxies = compat_urllib_request.getproxies() # Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805) if 'http' in proxies and 'https' not in proxies: proxies['https'] = proxies['http'] proxy_handler = PerRequestProxyHandler(proxies) debuglevel = 1 if self.params.get('debug_printtraffic') else 0 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel) ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel) opener = compat_urllib_request.build_opener( proxy_handler, https_handler, cookie_processor, ydlh) # Delete the default user-agent header, which would otherwise apply in # cases where our custom HTTP handler doesn't come into play # (See https://github.com/rg3/youtube-dl/issues/1309 for details) opener.addheaders = [] self._opener = opener def encode(self, s): if isinstance(s, bytes): return s # Already encoded try: return s.encode(self.get_encoding()) except UnicodeEncodeError as err: err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.' raise def get_encoding(self): encoding = self.params.get('encoding') if encoding is None: encoding = preferredencoding() return encoding def _write_thumbnails(self, info_dict, filename): if self.params.get('writethumbnail', False): thumbnails = info_dict.get('thumbnails') if thumbnails: thumbnails = [thumbnails[-1]] elif self.params.get('write_all_thumbnails', False): thumbnails = info_dict.get('thumbnails') else: return if not thumbnails: # No thumbnails present, so return immediately return for t in thumbnails: thumb_ext = determine_ext(t['url'], 'jpg') suffix = '_%s' % t['id'] if len(thumbnails) > 1 else '' thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else '' t['filename'] = thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)): self.to_screen('[%s] %s: Thumbnail %sis already present' % (info_dict['extractor'], info_dict['id'], thumb_display_id)) else: self.to_screen('[%s] %s: Downloading thumbnail %s...' % (info_dict['extractor'], info_dict['id'], thumb_display_id)) try: uf = self.urlopen(t['url']) with open(thumb_filename, 'wb') as thumbf: shutil.copyfileobj(uf, thumbf) self.to_screen('[%s] %s: Writing thumbnail %sto: %s' % (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename)) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_warning('Unable to download thumbnail "%s": %s' % (t['url'], compat_str(err)))
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-8-fixed/youtube-dl/youtube_dl/YoutubeDL.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-8-buggy/youtube-dl/youtube_dl/YoutubeDL.py
youtube-dl-bug-27
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals import base64 import calendar import codecs import contextlib import ctypes import datetime import email.utils import errno import functools import gzip import itertools import io import json import locale import math import operator import os import pipes import platform import re import ssl import socket import struct import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_basestring, compat_chr, compat_etree_fromstring, compat_html_entities, compat_http_client, compat_kwargs, compat_parse_qs, compat_socket_create_connection, compat_str, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlparse, compat_urllib_request, compat_urlparse, shlex_quote, ) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/20.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) if val: assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): # Here comes the crazy part: In 2.6, if the xpath is a unicode, # .//node does not match if a node is a direct child of . ! if isinstance(xpath, compat_str): xpath = xpath.encode('ascii') for f in node.findall(xpath): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): if sys.version_info < (2, 7): # Crazy 2.6 xpath = xpath.encode('ascii') return node.find(xpath) if isinstance(xpath, (str, compat_str)): n = _find_xpath(xpath) else: for xp in xpath: n = _find_xpath(xp) if n is not None: break if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute("id", id, html) def get_element_by_attribute(attribute, value, html): """Return the content of the tag with the specified attribute in the passed HTML document""" m = re.search(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), re.escape(value)), html) if not m: return None res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] return unescapeHTML(res) def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of # unwanted failures due to missing protocol def sanitized_Request(url, *args, **kwargs): return compat_urllib_request.Request( 'http:%s' % url if url.startswith('//') else url, *args, **kwargs) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity): """Transforms an HTML entity to a character.""" # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 # See https://github.com/rg3/youtube-dl/issues/7518 try: return compat_chr(int(numstr, base)) except ValueError: pass # Unknown entity in name, return its literal representation return '&%s;' % entity def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+);', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): # Both in bytes self.downloaded = downloaded self.expected = expected def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs[b'strict'] = True hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc def handle_youtubedl_headers(headers): filtered_headers = headers if 'Youtubedl-no-compression' in filtered_headers: filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') del filtered_headers['Youtubedl-no-compression'] return filtered_headers class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-no-compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): return self.do_open(functools.partial( _create_http_connection, self, compat_http_client.HTTPConnection, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request new_req = req_type( url_escaped, data=req.data, headers=req.headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) new_req.timeout = req.timeout req = new_req for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) req.headers = handle_youtubedl_headers(req.headers) if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname return self.do_open(functools.partial( _create_http_connection, self, self._https_conn_class, True), req, **kwargs) class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: m = re.search( r'(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group(0))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) try: date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) except ValueError: pass def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # %z (UTC offset) is only supported in python>=3.2 if not re.match(r'^[0-9]{1,2}-[0-9]{1,2}-[0-9]{4}$', date_str): date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) format_expressions = [ '%d %B %Y', '%d %b %Y', '%B %d %Y', '%b %d %Y', '%b %dst %Y %I:%M%p', '%b %dnd %Y %I:%M%p', '%b %dth %Y %I:%M%p', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', ] if day_first: format_expressions.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) else: format_expressions.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) for expression in format_expressions: try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') if upload_date is not None: return compat_str(upload_date) def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess elif guess.rstrip('/') in ( 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', 'flv', 'f4v', 'f4a', 'f4b', 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', 'mkv', 'mka', 'mk3d', 'avi', 'divx', 'mov', 'asf', 'wmv', 'wma', '3gp', '3g2', 'mp3', 'flac', 'ape', 'wav', 'f4f', 'f4m', 'm3u8', 'smil'): return guess.rstrip('/') else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad aproximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, "%Y%m%d").date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( (b"GetStdHandle", ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)((b"WriteConsoleW", ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b"GetFileType", ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( (b"GetConsoleMode", ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ sdata = compat_urllib_parse.urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and inofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, } units_re = '|'.join(re.escape(u) for u in _UNIT_TABLE) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = _UNIT_TABLE[m.group('unit')] return int(float(num_str) * mult) def month_by_name(name): """ Return the number of a month by (locale-independently) English name """ try: return ENGLISH_MONTH_NAMES.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) try: libc = ctypes.cdll.LoadLibrary("libc.so.6") except OSError: return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): if s.startswith(start): return s[len(start):] return s def remove_end(s, end): if s.endswith(end): return s[:-len(end)] return s def remove_quotes(s): if s is None or len(s) < 2: return s for quote in ('"', "'", ): if s[0] == quote and s[-1] == quote: return s[1:-1] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] class HEADRequest(compat_urllib_request.Request): def get_method(self): return "HEAD" def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() m = re.match( r'''(?ix)(?:P?T)? (?: (?P<only_mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*| (?P<only_hours>[0-9.]+)\s*(?:hours?)| \s*(?P<hours_reversed>[0-9]+)\s*(?:[:h]|hours?)\s*(?P<mins_reversed>[0-9]+)\s*(?:[:m]|mins?\.?|minutes?)\s*| (?: (?: (?:(?P<days>[0-9]+)\s*(?:[:d]|days?)\s*)? (?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s* )? (?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s* )? (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)? )$''', s) if not m: return None res = 0 if m.group('only_mins'): return float_or_none(m.group('only_mins'), invscale=60) if m.group('only_hours'): return float_or_none(m.group('only_hours'), invscale=60 * 60) if m.group('secs'): res += int(m.group('secs')) if m.group('mins_reversed'): res += int(m.group('mins_reversed')) * 60 if m.group('mins'): res += int(m.group('mins')) * 60 if m.group('hours'): res += int(m.group('hours')) * 60 * 60 if m.group('hours_reversed'): res += int(m.group('hours_reversed')) * 60 * 60 if m.group('days'): res += int(m.group('days')) * 24 * 60 * 60 if m.group('ms'): res += float(m.group('ms')) return res def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize): self._pagefunc = pagefunc self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = list(self._pagefunc(pagenum)) startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() try: struct.pack('!I', 0) except TypeError: # In Python 2.6 (and some 2.7 versions), struct requires a bytes argument def struct_pack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.pack(spec, *args) def struct_unpack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.unpack(spec, *args) else: struct_pack = struct.pack struct_unpack = struct.unpack def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii') def encode_dict(d, encoding='utf-8'): def encode(v): return v.encode(encoding) if isinstance(v, compat_basestring) else v return dict((encode(k), encode(v)) for k, v in d.items()) US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } def parse_age_limit(s): if s is None: return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) return int(m.group('age')) if m else US_RATINGS.get(s, None) def strip_jsonp(code): return re.sub( r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code) def js_to_json(code): def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v if v.startswith('"'): v = re.sub(r"\\'", "'", v[1:-1]) elif v.startswith("'"): v = v[1:-1] v = re.sub(r"\\\\|\\'|\"", lambda m: { '\\\\': '\\\\', "\\'": "'", '"': '\\"', }[m.group(0)], v) return '"%s"' % v res = re.sub(r'''(?x) "(?:[^"\\]*(?:\\\\|\\['"nu]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nu]))*[^'\\]*'| [a-zA-Z_][.a-zA-Z_0-9]* ''', fix_kv, code) res = re.sub(r',(\s*[\]}])', lambda m: m.group(1), res) return res def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(shlex_quote(a) for a in args) def mimetype2ext(mt): _, _, res = mt.rpartition('/') return { 'x-ms-wmv': 'wmv', 'x-mp4-fragmented': 'mp4', 'ttml+xml': 'ttml', }.get(res, res) def urlhandle_detect_ext(url_handle): try: url_handle.headers getheader = lambda h: url_handle.headers[h] except AttributeError: # Python < 3 getheader = url_handle.info().getheader cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] if m.group('strval') is not None: if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('strval') else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) actual_value = dct.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:\.\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3)) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): _x = functools.partial(xpath_with_ns, ns_map={ 'ttml': 'http://www.w3.org/ns/ttml', 'ttaf1': 'http://www.w3.org/2006/10/ttaf1', }) def parse_node(node): str_or_empty = functools.partial(str_or_none, default='') out = str_or_empty(node.text) for child in node: if child.tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'): out += '\n' + str_or_empty(child.tail) elif child.tag in (_x('ttml:span'), _x('ttaf1:span'), 'span'): out += str_or_empty(parse_node(child)) else: out += str_or_empty(xml.etree.ElementTree.tostring(child)) return out dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8')) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) dur = parse_dfxp_time_expr(para.attrib.get('dur')) if begin_time is None: continue if not end_time: if not dur: continue end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) return compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type) #!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals import base64 import calendar import codecs import contextlib import ctypes import datetime import email.utils import errno import functools import gzip import itertools import io import json import locale import math import operator import os import pipes import platform import re import ssl import socket import struct import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_basestring, compat_chr, compat_etree_fromstring, compat_html_entities, compat_http_client, compat_kwargs, compat_parse_qs, compat_socket_create_connection, compat_str, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlparse, compat_urllib_request, compat_urlparse, shlex_quote, ) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/20.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) if val: assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): # Here comes the crazy part: In 2.6, if the xpath is a unicode, # .//node does not match if a node is a direct child of . ! if isinstance(xpath, compat_str): xpath = xpath.encode('ascii') for f in node.findall(xpath): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): if sys.version_info < (2, 7): # Crazy 2.6 xpath = xpath.encode('ascii') return node.find(xpath) if isinstance(xpath, (str, compat_str)): n = _find_xpath(xpath) else: for xp in xpath: n = _find_xpath(xp) if n is not None: break if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute("id", id, html) def get_element_by_attribute(attribute, value, html): """Return the content of the tag with the specified attribute in the passed HTML document""" m = re.search(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), re.escape(value)), html) if not m: return None res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] return unescapeHTML(res) def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of # unwanted failures due to missing protocol def sanitized_Request(url, *args, **kwargs): return compat_urllib_request.Request( 'http:%s' % url if url.startswith('//') else url, *args, **kwargs) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity): """Transforms an HTML entity to a character.""" # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 # See https://github.com/rg3/youtube-dl/issues/7518 try: return compat_chr(int(numstr, base)) except ValueError: pass # Unknown entity in name, return its literal representation return '&%s;' % entity def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+);', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): # Both in bytes self.downloaded = downloaded self.expected = expected def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs[b'strict'] = True hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc def handle_youtubedl_headers(headers): filtered_headers = headers if 'Youtubedl-no-compression' in filtered_headers: filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') del filtered_headers['Youtubedl-no-compression'] return filtered_headers class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-no-compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): return self.do_open(functools.partial( _create_http_connection, self, compat_http_client.HTTPConnection, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request new_req = req_type( url_escaped, data=req.data, headers=req.headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) new_req.timeout = req.timeout req = new_req for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) req.headers = handle_youtubedl_headers(req.headers) if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname return self.do_open(functools.partial( _create_http_connection, self, self._https_conn_class, True), req, **kwargs) class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: m = re.search( r'(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group(0))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) try: date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) except ValueError: pass def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # %z (UTC offset) is only supported in python>=3.2 if not re.match(r'^[0-9]{1,2}-[0-9]{1,2}-[0-9]{4}$', date_str): date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) format_expressions = [ '%d %B %Y', '%d %b %Y', '%B %d %Y', '%b %d %Y', '%b %dst %Y %I:%M%p', '%b %dnd %Y %I:%M%p', '%b %dth %Y %I:%M%p', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', ] if day_first: format_expressions.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) else: format_expressions.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) for expression in format_expressions: try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') if upload_date is not None: return compat_str(upload_date) def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess elif guess.rstrip('/') in ( 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', 'flv', 'f4v', 'f4a', 'f4b', 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', 'mkv', 'mka', 'mk3d', 'avi', 'divx', 'mov', 'asf', 'wmv', 'wma', '3gp', '3g2', 'mp3', 'flac', 'ape', 'wav', 'f4f', 'f4m', 'm3u8', 'smil'): return guess.rstrip('/') else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad aproximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, "%Y%m%d").date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( (b"GetStdHandle", ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)((b"WriteConsoleW", ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b"GetFileType", ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( (b"GetConsoleMode", ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ sdata = compat_urllib_parse.urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and inofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, } units_re = '|'.join(re.escape(u) for u in _UNIT_TABLE) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = _UNIT_TABLE[m.group('unit')] return int(float(num_str) * mult) def month_by_name(name): """ Return the number of a month by (locale-independently) English name """ try: return ENGLISH_MONTH_NAMES.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) try: libc = ctypes.cdll.LoadLibrary("libc.so.6") except OSError: return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): if s.startswith(start): return s[len(start):] return s def remove_end(s, end): if s.endswith(end): return s[:-len(end)] return s def remove_quotes(s): if s is None or len(s) < 2: return s for quote in ('"', "'", ): if s[0] == quote and s[-1] == quote: return s[1:-1] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] class HEADRequest(compat_urllib_request.Request): def get_method(self): return "HEAD" def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() m = re.match( r'''(?ix)(?:P?T)? (?: (?P<only_mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*| (?P<only_hours>[0-9.]+)\s*(?:hours?)| \s*(?P<hours_reversed>[0-9]+)\s*(?:[:h]|hours?)\s*(?P<mins_reversed>[0-9]+)\s*(?:[:m]|mins?\.?|minutes?)\s*| (?: (?: (?:(?P<days>[0-9]+)\s*(?:[:d]|days?)\s*)? (?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s* )? (?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s* )? (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)? )$''', s) if not m: return None res = 0 if m.group('only_mins'): return float_or_none(m.group('only_mins'), invscale=60) if m.group('only_hours'): return float_or_none(m.group('only_hours'), invscale=60 * 60) if m.group('secs'): res += int(m.group('secs')) if m.group('mins_reversed'): res += int(m.group('mins_reversed')) * 60 if m.group('mins'): res += int(m.group('mins')) * 60 if m.group('hours'): res += int(m.group('hours')) * 60 * 60 if m.group('hours_reversed'): res += int(m.group('hours_reversed')) * 60 * 60 if m.group('days'): res += int(m.group('days')) * 24 * 60 * 60 if m.group('ms'): res += float(m.group('ms')) return res def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize): self._pagefunc = pagefunc self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = list(self._pagefunc(pagenum)) startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() try: struct.pack('!I', 0) except TypeError: # In Python 2.6 (and some 2.7 versions), struct requires a bytes argument def struct_pack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.pack(spec, *args) def struct_unpack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.unpack(spec, *args) else: struct_pack = struct.pack struct_unpack = struct.unpack def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii') def encode_dict(d, encoding='utf-8'): def encode(v): return v.encode(encoding) if isinstance(v, compat_basestring) else v return dict((encode(k), encode(v)) for k, v in d.items()) US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } def parse_age_limit(s): if s is None: return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) return int(m.group('age')) if m else US_RATINGS.get(s, None) def strip_jsonp(code): return re.sub( r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code) def js_to_json(code): def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v if v.startswith('"'): v = re.sub(r"\\'", "'", v[1:-1]) elif v.startswith("'"): v = v[1:-1] v = re.sub(r"\\\\|\\'|\"", lambda m: { '\\\\': '\\\\', "\\'": "'", '"': '\\"', }[m.group(0)], v) return '"%s"' % v res = re.sub(r'''(?x) "(?:[^"\\]*(?:\\\\|\\['"nu]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nu]))*[^'\\]*'| [a-zA-Z_][.a-zA-Z_0-9]* ''', fix_kv, code) res = re.sub(r',(\s*[\]}])', lambda m: m.group(1), res) return res def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(shlex_quote(a) for a in args) def mimetype2ext(mt): _, _, res = mt.rpartition('/') return { 'x-ms-wmv': 'wmv', 'x-mp4-fragmented': 'mp4', 'ttml+xml': 'ttml', }.get(res, res) def urlhandle_detect_ext(url_handle): try: url_handle.headers getheader = lambda h: url_handle.headers[h] except AttributeError: # Python < 3 getheader = url_handle.info().getheader cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] if m.group('strval') is not None: if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('strval') else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) actual_value = dct.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.')) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): _x = functools.partial(xpath_with_ns, ns_map={ 'ttml': 'http://www.w3.org/ns/ttml', 'ttaf1': 'http://www.w3.org/2006/10/ttaf1', }) def parse_node(node): str_or_empty = functools.partial(str_or_none, default='') out = str_or_empty(node.text) for child in node: if child.tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'): out += '\n' + str_or_empty(child.tail) elif child.tag in (_x('ttml:span'), _x('ttaf1:span'), 'span'): out += str_or_empty(parse_node(child)) else: out += str_or_empty(xml.etree.ElementTree.tostring(child)) return out dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8')) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) dur = parse_dfxp_time_expr(para.attrib.get('dur')) if begin_time is None: continue if not end_time: if not dur: continue end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) return compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type)
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-27-fixed/youtube-dl/youtube_dl/utils.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-27-buggy/youtube-dl/youtube_dl/utils.py
youtube-dl-bug-40
from __future__ import unicode_literals import base64 import io import itertools import os from struct import unpack, pack import time import xml.etree.ElementTree as etree from .common import FileDownloader from .http import HttpFD from ..utils import ( compat_urllib_request, compat_urlparse, format_bytes, encodeFilename, sanitize_open, ) class FlvReader(io.BytesIO): """ Reader for Flv files The file format is documented in https://www.adobe.com/devnet/f4v.html """ # Utility functions for reading numbers and strings def read_unsigned_long_long(self): return unpack('!Q', self.read(8))[0] def read_unsigned_int(self): return unpack('!I', self.read(4))[0] def read_unsigned_char(self): return unpack('!B', self.read(1))[0] def read_string(self): res = b'' while True: char = self.read(1) if char == b'\x00': break res += char return res def read_box_info(self): """ Read a box and return the info as a tuple: (box_size, box_type, box_data) """ real_size = size = self.read_unsigned_int() box_type = self.read(4) header_end = 8 if size == 1: real_size = self.read_unsigned_long_long() header_end = 16 return real_size, box_type, self.read(real_size-header_end) def read_asrt(self): # version self.read_unsigned_char() # flags self.read(3) quality_entry_count = self.read_unsigned_char() # QualityEntryCount for i in range(quality_entry_count): self.read_string() segment_run_count = self.read_unsigned_int() segments = [] for i in range(segment_run_count): first_segment = self.read_unsigned_int() fragments_per_segment = self.read_unsigned_int() segments.append((first_segment, fragments_per_segment)) return { 'segment_run': segments, } def read_afrt(self): # version self.read_unsigned_char() # flags self.read(3) # time scale self.read_unsigned_int() quality_entry_count = self.read_unsigned_char() # QualitySegmentUrlModifiers for i in range(quality_entry_count): self.read_string() fragments_count = self.read_unsigned_int() fragments = [] for i in range(fragments_count): first = self.read_unsigned_int() first_ts = self.read_unsigned_long_long() duration = self.read_unsigned_int() if duration == 0: discontinuity_indicator = self.read_unsigned_char() else: discontinuity_indicator = None fragments.append({ 'first': first, 'ts': first_ts, 'duration': duration, 'discontinuity_indicator': discontinuity_indicator, }) return { 'fragments': fragments, } def read_abst(self): # version self.read_unsigned_char() # flags self.read(3) # BootstrapinfoVersion bootstrap_info_version = self.read_unsigned_int() # Profile,Live,Update,Reserved self.read(1) # time scale self.read_unsigned_int() # CurrentMediaTime self.read_unsigned_long_long() # SmpteTimeCodeOffset self.read_unsigned_long_long() # MovieIdentifier movie_identifier = self.read_string() server_count = self.read_unsigned_char() # ServerEntryTable for i in range(server_count): self.read_string() quality_count = self.read_unsigned_char() # QualityEntryTable for i in range(server_count): self.read_string() # DrmData self.read_string() # MetaData self.read_string() segments_count = self.read_unsigned_char() segments = [] for i in range(segments_count): box_size, box_type, box_data = self.read_box_info() assert box_type == b'asrt' segment = FlvReader(box_data).read_asrt() segments.append(segment) fragments_run_count = self.read_unsigned_char() fragments = [] for i in range(fragments_run_count): box_size, box_type, box_data = self.read_box_info() assert box_type == b'afrt' fragments.append(FlvReader(box_data).read_afrt()) return { 'segments': segments, 'fragments': fragments, } def read_bootstrap_info(self): total_size, box_type, box_data = self.read_box_info() assert box_type == b'abst' return FlvReader(box_data).read_abst() def read_bootstrap_info(bootstrap_bytes): return FlvReader(bootstrap_bytes).read_bootstrap_info() def build_fragments_list(boot_info): """ Return a list of (segment, fragment) for each fragment in the video """ res = [] segment_run_table = boot_info['segments'][0] # I've only found videos with one segment segment_run_entry = segment_run_table['segment_run'][0] n_frags = segment_run_entry[1] fragment_run_entry_table = boot_info['fragments'][0]['fragments'] first_frag_number = fragment_run_entry_table[0]['first'] for (i, frag_number) in zip(range(1, n_frags+1), itertools.count(first_frag_number)): res.append((1, frag_number)) return res def write_flv_header(stream, metadata): """Writes the FLV header and the metadata to stream""" # FLV header stream.write(b'FLV\x01') stream.write(b'\x05') stream.write(b'\x00\x00\x00\x09') # FLV File body stream.write(b'\x00\x00\x00\x00') # FLVTAG # Script data stream.write(b'\x12') # Size of the metadata with 3 bytes stream.write(pack('!L', len(metadata))[1:]) stream.write(b'\x00\x00\x00\x00\x00\x00\x00') stream.write(metadata) # Magic numbers extracted from the output files produced by AdobeHDS.php #(https://github.com/K-S-V/Scripts) stream.write(b'\x00\x00\x01\x73') def _add_ns(prop): return '{http://ns.adobe.com/f4m/1.0}%s' % prop class HttpQuietDownloader(HttpFD): def to_screen(self, *args, **kargs): pass class F4mFD(FileDownloader): """ A downloader for f4m manifests or AdobeHDS. """ def real_download(self, filename, info_dict): man_url = info_dict['url'] self.to_screen('[download] Downloading f4m manifest') manifest = self.ydl.urlopen(man_url).read() self.report_destination(filename) http_dl = HttpQuietDownloader(self.ydl, {'continuedl': True, 'quiet': True, 'noprogress': True}) doc = etree.fromstring(manifest) formats = [(int(f.attrib.get('bitrate', -1)), f) for f in doc.findall(_add_ns('media'))] formats = sorted(formats, key=lambda f: f[0]) rate, media = formats[-1] base_url = compat_urlparse.urljoin(man_url, media.attrib['url']) bootstrap = base64.b64decode(doc.find(_add_ns('bootstrapInfo')).text) metadata = base64.b64decode(media.find(_add_ns('metadata')).text) boot_info = read_bootstrap_info(bootstrap) fragments_list = build_fragments_list(boot_info) total_frags = len(fragments_list) tmpfilename = self.temp_name(filename) (dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb') write_flv_header(dest_stream, metadata) # This dict stores the download progress, it's updated by the progress # hook state = { 'downloaded_bytes': 0, 'frag_counter': 0, } start = time.time() def frag_progress_hook(status): frag_total_bytes = status.get('total_bytes', 0) estimated_size = (state['downloaded_bytes'] + (total_frags - state['frag_counter']) * frag_total_bytes) if status['status'] == 'finished': state['downloaded_bytes'] += frag_total_bytes state['frag_counter'] += 1 progress = self.calc_percent(state['frag_counter'], total_frags) byte_counter = state['downloaded_bytes'] else: frag_downloaded_bytes = status['downloaded_bytes'] byte_counter = state['downloaded_bytes'] + frag_downloaded_bytes frag_progress = self.calc_percent(frag_downloaded_bytes, frag_total_bytes) progress = self.calc_percent(state['frag_counter'], total_frags) progress += frag_progress / float(total_frags) eta = self.calc_eta(start, time.time(), estimated_size, byte_counter) self.report_progress(progress, format_bytes(estimated_size), status.get('speed'), eta) http_dl.add_progress_hook(frag_progress_hook) frags_filenames = [] for (seg_i, frag_i) in fragments_list: name = 'Seg%d-Frag%d' % (seg_i, frag_i) url = base_url + name frag_filename = '%s-%s' % (tmpfilename, name) success = http_dl.download(frag_filename, {'url': url}) if not success: return False with open(frag_filename, 'rb') as down: down_data = down.read() reader = FlvReader(down_data) while True: _, box_type, box_data = reader.read_box_info() if box_type == b'mdat': dest_stream.write(box_data) break frags_filenames.append(frag_filename) self.report_finish(format_bytes(state['downloaded_bytes']), time.time() - start) self.try_rename(tmpfilename, filename) for frag_file in frags_filenames: os.remove(frag_file) fsize = os.path.getsize(encodeFilename(filename)) self._hook_progress({ 'downloaded_bytes': fsize, 'total_bytes': fsize, 'filename': filename, 'status': 'finished', }) return True from __future__ import unicode_literals import base64 import io import itertools import os import time import xml.etree.ElementTree as etree from .common import FileDownloader from .http import HttpFD from ..utils import ( struct_pack, struct_unpack, compat_urllib_request, compat_urlparse, format_bytes, encodeFilename, sanitize_open, ) class FlvReader(io.BytesIO): """ Reader for Flv files The file format is documented in https://www.adobe.com/devnet/f4v.html """ # Utility functions for reading numbers and strings def read_unsigned_long_long(self): return struct_unpack('!Q', self.read(8))[0] def read_unsigned_int(self): return struct_unpack('!I', self.read(4))[0] def read_unsigned_char(self): return struct_unpack('!B', self.read(1))[0] def read_string(self): res = b'' while True: char = self.read(1) if char == b'\x00': break res += char return res def read_box_info(self): """ Read a box and return the info as a tuple: (box_size, box_type, box_data) """ real_size = size = self.read_unsigned_int() box_type = self.read(4) header_end = 8 if size == 1: real_size = self.read_unsigned_long_long() header_end = 16 return real_size, box_type, self.read(real_size-header_end) def read_asrt(self): # version self.read_unsigned_char() # flags self.read(3) quality_entry_count = self.read_unsigned_char() # QualityEntryCount for i in range(quality_entry_count): self.read_string() segment_run_count = self.read_unsigned_int() segments = [] for i in range(segment_run_count): first_segment = self.read_unsigned_int() fragments_per_segment = self.read_unsigned_int() segments.append((first_segment, fragments_per_segment)) return { 'segment_run': segments, } def read_afrt(self): # version self.read_unsigned_char() # flags self.read(3) # time scale self.read_unsigned_int() quality_entry_count = self.read_unsigned_char() # QualitySegmentUrlModifiers for i in range(quality_entry_count): self.read_string() fragments_count = self.read_unsigned_int() fragments = [] for i in range(fragments_count): first = self.read_unsigned_int() first_ts = self.read_unsigned_long_long() duration = self.read_unsigned_int() if duration == 0: discontinuity_indicator = self.read_unsigned_char() else: discontinuity_indicator = None fragments.append({ 'first': first, 'ts': first_ts, 'duration': duration, 'discontinuity_indicator': discontinuity_indicator, }) return { 'fragments': fragments, } def read_abst(self): # version self.read_unsigned_char() # flags self.read(3) # BootstrapinfoVersion bootstrap_info_version = self.read_unsigned_int() # Profile,Live,Update,Reserved self.read(1) # time scale self.read_unsigned_int() # CurrentMediaTime self.read_unsigned_long_long() # SmpteTimeCodeOffset self.read_unsigned_long_long() # MovieIdentifier movie_identifier = self.read_string() server_count = self.read_unsigned_char() # ServerEntryTable for i in range(server_count): self.read_string() quality_count = self.read_unsigned_char() # QualityEntryTable for i in range(server_count): self.read_string() # DrmData self.read_string() # MetaData self.read_string() segments_count = self.read_unsigned_char() segments = [] for i in range(segments_count): box_size, box_type, box_data = self.read_box_info() assert box_type == b'asrt' segment = FlvReader(box_data).read_asrt() segments.append(segment) fragments_run_count = self.read_unsigned_char() fragments = [] for i in range(fragments_run_count): box_size, box_type, box_data = self.read_box_info() assert box_type == b'afrt' fragments.append(FlvReader(box_data).read_afrt()) return { 'segments': segments, 'fragments': fragments, } def read_bootstrap_info(self): total_size, box_type, box_data = self.read_box_info() assert box_type == b'abst' return FlvReader(box_data).read_abst() def read_bootstrap_info(bootstrap_bytes): return FlvReader(bootstrap_bytes).read_bootstrap_info() def build_fragments_list(boot_info): """ Return a list of (segment, fragment) for each fragment in the video """ res = [] segment_run_table = boot_info['segments'][0] # I've only found videos with one segment segment_run_entry = segment_run_table['segment_run'][0] n_frags = segment_run_entry[1] fragment_run_entry_table = boot_info['fragments'][0]['fragments'] first_frag_number = fragment_run_entry_table[0]['first'] for (i, frag_number) in zip(range(1, n_frags+1), itertools.count(first_frag_number)): res.append((1, frag_number)) return res def write_flv_header(stream, metadata): """Writes the FLV header and the metadata to stream""" # FLV header stream.write(b'FLV\x01') stream.write(b'\x05') stream.write(b'\x00\x00\x00\x09') # FLV File body stream.write(b'\x00\x00\x00\x00') # FLVTAG # Script data stream.write(b'\x12') # Size of the metadata with 3 bytes stream.write(struct_pack('!L', len(metadata))[1:]) stream.write(b'\x00\x00\x00\x00\x00\x00\x00') stream.write(metadata) # Magic numbers extracted from the output files produced by AdobeHDS.php #(https://github.com/K-S-V/Scripts) stream.write(b'\x00\x00\x01\x73') def _add_ns(prop): return '{http://ns.adobe.com/f4m/1.0}%s' % prop class HttpQuietDownloader(HttpFD): def to_screen(self, *args, **kargs): pass class F4mFD(FileDownloader): """ A downloader for f4m manifests or AdobeHDS. """ def real_download(self, filename, info_dict): man_url = info_dict['url'] self.to_screen('[download] Downloading f4m manifest') manifest = self.ydl.urlopen(man_url).read() self.report_destination(filename) http_dl = HttpQuietDownloader(self.ydl, {'continuedl': True, 'quiet': True, 'noprogress': True}) doc = etree.fromstring(manifest) formats = [(int(f.attrib.get('bitrate', -1)), f) for f in doc.findall(_add_ns('media'))] formats = sorted(formats, key=lambda f: f[0]) rate, media = formats[-1] base_url = compat_urlparse.urljoin(man_url, media.attrib['url']) bootstrap = base64.b64decode(doc.find(_add_ns('bootstrapInfo')).text) metadata = base64.b64decode(media.find(_add_ns('metadata')).text) boot_info = read_bootstrap_info(bootstrap) fragments_list = build_fragments_list(boot_info) total_frags = len(fragments_list) tmpfilename = self.temp_name(filename) (dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb') write_flv_header(dest_stream, metadata) # This dict stores the download progress, it's updated by the progress # hook state = { 'downloaded_bytes': 0, 'frag_counter': 0, } start = time.time() def frag_progress_hook(status): frag_total_bytes = status.get('total_bytes', 0) estimated_size = (state['downloaded_bytes'] + (total_frags - state['frag_counter']) * frag_total_bytes) if status['status'] == 'finished': state['downloaded_bytes'] += frag_total_bytes state['frag_counter'] += 1 progress = self.calc_percent(state['frag_counter'], total_frags) byte_counter = state['downloaded_bytes'] else: frag_downloaded_bytes = status['downloaded_bytes'] byte_counter = state['downloaded_bytes'] + frag_downloaded_bytes frag_progress = self.calc_percent(frag_downloaded_bytes, frag_total_bytes) progress = self.calc_percent(state['frag_counter'], total_frags) progress += frag_progress / float(total_frags) eta = self.calc_eta(start, time.time(), estimated_size, byte_counter) self.report_progress(progress, format_bytes(estimated_size), status.get('speed'), eta) http_dl.add_progress_hook(frag_progress_hook) frags_filenames = [] for (seg_i, frag_i) in fragments_list: name = 'Seg%d-Frag%d' % (seg_i, frag_i) url = base_url + name frag_filename = '%s-%s' % (tmpfilename, name) success = http_dl.download(frag_filename, {'url': url}) if not success: return False with open(frag_filename, 'rb') as down: down_data = down.read() reader = FlvReader(down_data) while True: _, box_type, box_data = reader.read_box_info() if box_type == b'mdat': dest_stream.write(box_data) break frags_filenames.append(frag_filename) self.report_finish(format_bytes(state['downloaded_bytes']), time.time() - start) self.try_rename(tmpfilename, filename) for frag_file in frags_filenames: os.remove(frag_file) fsize = os.path.getsize(encodeFilename(filename)) self._hook_progress({ 'downloaded_bytes': fsize, 'total_bytes': fsize, 'filename': filename, 'status': 'finished', }) return True
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-40-fixed/youtube-dl/youtube_dl/downloader/f4m.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-40-buggy/youtube-dl/youtube_dl/downloader/f4m.py
youtube-dl-bug-22
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals import base64 import binascii import calendar import codecs import contextlib import ctypes import datetime import email.utils import errno import functools import gzip import io import itertools import json import locale import math import operator import os import pipes import platform import re import socket import ssl import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_HTMLParser, compat_basestring, compat_chr, compat_etree_fromstring, compat_html_entities, compat_html_entities_html5, compat_http_client, compat_kwargs, compat_os_name, compat_parse_qs, compat_shlex_quote, compat_socket_create_connection, compat_str, compat_struct_pack, compat_struct_unpack, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, compat_urllib_parse_unquote_plus, compat_urllib_request, compat_urlparse, compat_xpath, ) from .socks import ( ProxyType, sockssocket, ) def register_socks_protocols(): # "Register" SOCKS protocols # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904 # URLs with protocols not in urlparse.uses_netloc are not handled correctly for scheme in ('socks', 'socks4', 'socks4a', 'socks5'): if scheme not in compat_urlparse.uses_netloc: compat_urlparse.uses_netloc.append(scheme) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } USER_AGENTS = { 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] MONTH_NAMES = { 'en': ENGLISH_MONTH_NAMES, 'fr': [ 'janvier', 'février', 'mars', 'avril', 'mai', 'juin', 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'], } KNOWN_EXTENSIONS = ( 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', 'flv', 'f4v', 'f4a', 'f4b', 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', 'mkv', 'mka', 'mk3d', 'avi', 'divx', 'mov', 'asf', 'wmv', 'wma', '3gp', '3g2', 'mp3', 'flac', 'ape', 'wav', 'f4f', 'f4m', 'm3u8', 'smil') # needed for sanitizing filenames in restricted mode ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'], 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy'))) DATE_FORMATS = ( '%d %B %Y', '%d %b %Y', '%B %d %Y', '%B %dst %Y', '%B %dnd %Y', '%B %dth %Y', '%b %d %Y', '%b %dst %Y', '%b %dnd %Y', '%b %dth %Y', '%b %dst %Y %I:%M', '%b %dnd %Y %I:%M', '%b %dth %Y %I:%M', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', '%b %d %Y at %H:%M', '%b %d %Y at %H:%M:%S', ) DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS) DATE_FORMATS_DAY_FIRST.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d.%m.%y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS) DATE_FORMATS_MONTH_FIRST.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)" def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): for f in node.findall(compat_xpath(xpath)): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): return node.find(compat_xpath(xpath)) if isinstance(xpath, (str, compat_str)): n = _find_xpath(xpath) else: for xp in xpath: n = _find_xpath(xp) if n is not None: break if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute('id', id, html) def get_element_by_class(class_name, html): """Return the content of the first tag with the specified class in the passed HTML document""" retval = get_elements_by_class(class_name, html) return retval[0] if retval else None def get_element_by_attribute(attribute, value, html, escape_value=True): retval = get_elements_by_attribute(attribute, value, html, escape_value) return retval[0] if retval else None def get_elements_by_class(class_name, html): """Return the content of all tags with the specified class in the passed HTML document as a list""" return get_elements_by_attribute( 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name), html, escape_value=False) def get_elements_by_attribute(attribute, value, html, escape_value=True): """Return the content of the tag with the specified attribute in the passed HTML document""" value = re.escape(value) if escape_value else value retlist = [] for m in re.finditer(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), value), html): res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] retlist.append(unescapeHTML(res)) return retlist class HTMLAttributeParser(compat_HTMLParser): """Trivial HTML parser to gather the attributes for a single element""" def __init__(self): self.attrs = {} compat_HTMLParser.__init__(self) def handle_starttag(self, tag, attrs): self.attrs = dict(attrs) def extract_attributes(html_element): """Given a string for an HTML element such as <el a="foo" B="bar" c="&98;az" d=boz empty= noval entity="&amp;" sq='"' dq="'" > Decode and return a dictionary of attributes. { 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz', 'empty': '', 'noval': None, 'entity': '&', 'sq': '"', 'dq': '\'' }. NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions, but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5. """ parser = HTMLAttributeParser() parser.feed(html_element) parser.close() return parser.attrs def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if restricted and char in ACCENT_CHARS: return ACCENT_CHARS[char] if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of # unwanted failures due to missing protocol def sanitize_url(url): return 'http:%s' % url if url.startswith('//') else url def sanitized_Request(url, *args, **kwargs): return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity_with_semicolon): """Transforms an HTML entity to a character.""" entity = entity_with_semicolon[:-1] # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) # TODO: HTML5 allows entities without a semicolon. For example, # '&Eacuteric' should be decoded as 'Éric'. if entity_with_semicolon in compat_html_entities_html5: return compat_html_entities_html5[entity_with_semicolon] mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 # See https://github.com/rg3/youtube-dl/issues/7518 try: return compat_chr(int(numstr, base)) except ValueError: pass # Unknown entity in name, return its literal representation return '&%s;' % entity def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+;)', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible if sys.platform.startswith('java'): return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): # Both in bytes self.downloaded = downloaded self.expected = expected class XAttrMetadataError(Exception): def __init__(self, code=None, msg='Unknown error'): super(XAttrMetadataError, self).__init__(msg) self.code = code self.msg = msg # Parsing code and msg if (self.code in (errno.ENOSPC, errno.EDQUOT) or 'No space left' in self.msg or 'Disk quota excedded' in self.msg): self.reason = 'NO_SPACE' elif self.code == errno.E2BIG or 'Argument list too long' in self.msg: self.reason = 'VALUE_TOO_LONG' else: self.reason = 'NOT_SUPPORTED' class XAttrUnavailableError(Exception): pass def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs[b'strict'] = True hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc def handle_youtubedl_headers(headers): filtered_headers = headers if 'Youtubedl-no-compression' in filtered_headers: filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') del filtered_headers['Youtubedl-no-compression'] return filtered_headers class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-no-compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): conn_class = compat_http_client.HTTPConnection socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req = update_Request(req, url=url_escaped) for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) req.headers = handle_youtubedl_headers(req.headers) if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') else: location = location.decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] if sys.version_info < (3, 0): location_escaped = location_escaped.encode('utf-8') resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response def make_socks_conn_class(base_class, socks_proxy): assert issubclass(base_class, ( compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection)) url_components = compat_urlparse.urlparse(socks_proxy) if url_components.scheme.lower() == 'socks5': socks_type = ProxyType.SOCKS5 elif url_components.scheme.lower() in ('socks', 'socks4'): socks_type = ProxyType.SOCKS4 elif url_components.scheme.lower() == 'socks4a': socks_type = ProxyType.SOCKS4A def unquote_if_non_empty(s): if not s: return s return compat_urllib_parse_unquote_plus(s) proxy_args = ( socks_type, url_components.hostname, url_components.port or 1080, True, # Remote DNS unquote_if_non_empty(url_components.username), unquote_if_non_empty(url_components.password), ) class SocksConnection(base_class): def connect(self): self.sock = sockssocket() self.sock.setproxy(*proxy_args) if type(self.timeout) in (int, float): self.sock.settimeout(self.timeout) self.sock.connect((self.host, self.port)) if isinstance(self, compat_http_client.HTTPSConnection): if hasattr(self, '_context'): # Python > 2.6 self.sock = self._context.wrap_socket( self.sock, server_hostname=self.host) else: self.sock = ssl.wrap_socket(self.sock) return SocksConnection class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} conn_class = self._https_conn_class if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, True), req, **kwargs) class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def extract_timezone(date_str): m = re.search( r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group('tz'))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) return timezone, date_str def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: timezone, date_str = extract_timezone(date_str) try: date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) except ValueError: pass def date_formats(day_first=True): return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) _, date_str = extract_timezone(date_str) for expression in date_formats(day_first): try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: try: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') except ValueError: pass if upload_date is not None: return compat_str(upload_date) def unified_timestamp(date_str, day_first=True): if date_str is None: return None date_str = date_str.replace(',', ' ') pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0 timezone, date_str = extract_timezone(date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) for expression in date_formats(day_first): try: dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta) return calendar.timegm(dt.timetuple()) except ValueError: pass timetuple = email.utils.parsedate_tz(date_str) if timetuple: return calendar.timegm(timetuple) + pm_delta * 3600 def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download elif guess.rstrip('/') in KNOWN_EXTENSIONS: return guess.rstrip('/') else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad approximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, '%Y%m%d').date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( (b'GetStdHandle', ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( (b'GetConsoleMode', ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return compat_struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: # Some platforms, such as Jython, is missing fcntl try: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) except ImportError: UNSUPPORTED_MSG = 'file locking is not supported on this platform' def _lock_file(f, exclusive): raise IOError(UNSUPPORTED_MSG) def _unlock_file(f): raise IOError(UNSUPPORTED_MSG) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ url, idata = unsmuggle_url(url, {}) data.update(idata) sdata = compat_urllib_parse_urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def lookup_unit_table(unit_table, s): units_re = '|'.join(re.escape(u) for u in unit_table) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = unit_table[m.group('unit')] return int(float(num_str) * mult) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and unofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'bytes': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'kb': 1000, 'kilobytes': 1000, 'kibibytes': 1024, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'mb': 1000 ** 2, 'megabytes': 1000 ** 2, 'mebibytes': 1024 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'gb': 1000 ** 3, 'gigabytes': 1000 ** 3, 'gibibytes': 1024 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'tb': 1000 ** 4, 'terabytes': 1000 ** 4, 'tebibytes': 1024 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'pb': 1000 ** 5, 'petabytes': 1000 ** 5, 'pebibytes': 1024 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'eb': 1000 ** 6, 'exabytes': 1000 ** 6, 'exbibytes': 1024 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'zb': 1000 ** 7, 'zettabytes': 1000 ** 7, 'zebibytes': 1024 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, 'yb': 1000 ** 8, 'yottabytes': 1000 ** 8, 'yobibytes': 1024 ** 8, } return lookup_unit_table(_UNIT_TABLE, s) def parse_count(s): if s is None: return None s = s.strip() if re.match(r'^[\d,.]+$', s): return str_to_int(s) _UNIT_TABLE = { 'k': 1000, 'K': 1000, 'm': 1000 ** 2, 'M': 1000 ** 2, 'kk': 1000 ** 2, 'KK': 1000 ** 2, } return lookup_unit_table(_UNIT_TABLE, s) def month_by_name(name, lang='en'): """ Return the number of a month by (locale-independently) English name """ month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en']) try: return month_names.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) # ctypes in Jython is not complete # http://bugs.jython.org/issue2148 if sys.platform.startswith('java'): return try: libc = ctypes.cdll.LoadLibrary('libc.so.6') except OSError: return except TypeError: # LoadLibrary in Windows Python 2.7.13 only expects # a bytestring, but since unicode_literals turns # every string into a unicode string, it fails. return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): return s[len(start):] if s is not None and s.startswith(start) else s def remove_end(s, end): return s[:-len(end)] if s is not None and s.endswith(end) else s def remove_quotes(s): if s is None or len(s) < 2: return s for quote in ('"', "'", ): if s[0] == quote and s[-1] == quote: return s[1:-1] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] def base_url(url): return re.match(r'https?://[^?#&]+/', url).group() def urljoin(base, path): if not isinstance(path, compat_str) or not path: return None if re.match(r'^(?:https?:)?//', path): return path if not isinstance(base, compat_str) or not re.match(r'^(?:https?:)?//', base): return None return compat_urlparse.urljoin(base, path) class HEADRequest(compat_urllib_request.Request): def get_method(self): return 'HEAD' class PUTRequest(compat_urllib_request.Request): def get_method(self): return 'PUT' def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def strip_or_none(v): return None if v is None else v.strip() def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() days, hours, mins, secs, ms = [None] * 5 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match( r'''(?ix)(?:P?T)? (?: (?P<days>[0-9]+)\s*d(?:ays?)?\s* )? (?: (?P<hours>[0-9]+)\s*h(?:ours?)?\s* )? (?: (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s* )? (?: (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s* )?Z?$''', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s) if m: hours, mins = m.groups() else: return None duration = 0 if secs: duration += float(secs) if mins: duration += float(mins) * 60 if hours: duration += float(hours) * 60 * 60 if days: duration += float(days) * 24 * 60 * 60 if ms: duration += float(ms) return duration def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers # SIGTTOU if youtube-dl is run in the background. # See https://github.com/rg3/youtube-dl/issues/955#issuecomment-209789656 out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize, use_cache=False): self._pagefunc = pagefunc self._pagesize = pagesize self._use_cache = use_cache if use_cache: self._cache = {} def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = None if self._use_cache: page_results = self._cache.get(pagenum) if page_results is None: page_results = list(self._pagefunc(pagenum)) if self._use_cache: self._cache[pagenum] = page_results startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( netloc=url_parsed.netloc.encode('idna').decode('ascii'), path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii') def update_url_query(url, query): if not query: return url parsed_url = compat_urlparse.urlparse(url) qs = compat_parse_qs(parsed_url.query) qs.update(query) return compat_urlparse.urlunparse(parsed_url._replace( query=compat_urllib_parse_urlencode(qs, True))) def update_Request(req, url=None, data=None, headers={}, query={}): req_headers = req.headers.copy() req_headers.update(headers) req_data = data or req.data req_url = update_url_query(url or req.get_full_url(), query) req_get_method = req.get_method() if req_get_method == 'HEAD': req_type = HEADRequest elif req_get_method == 'PUT': req_type = PUTRequest else: req_type = compat_urllib_request.Request new_req = req_type( req_url, data=req_data, headers=req_headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) if hasattr(req, 'timeout'): new_req.timeout = req.timeout return new_req def dict_get(d, key_or_keys, default=None, skip_false_values=True): if isinstance(key_or_keys, (list, tuple)): for key in key_or_keys: if key not in d or d[key] is None or skip_false_values and not d[key]: continue return d[key] return default return d.get(key_or_keys, default) def try_get(src, getter, expected_type=None): try: v = getter(src) except (AttributeError, KeyError, TypeError, IndexError): pass else: if expected_type is None or isinstance(v, expected_type): return v def encode_compat_str(string, encoding=preferredencoding(), errors='strict'): return string if isinstance(string, compat_str) else compat_str(string, encoding, errors) US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } TV_PARENTAL_GUIDELINES = { 'TV-Y': 0, 'TV-Y7': 7, 'TV-G': 0, 'TV-PG': 0, 'TV-14': 14, 'TV-MA': 17, } def parse_age_limit(s): if type(s) == int: return s if 0 <= s <= 21 else None if not isinstance(s, compat_basestring): return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) if m: return int(m.group('age')) if s in US_RATINGS: return US_RATINGS[s] return TV_PARENTAL_GUIDELINES.get(s) def strip_jsonp(code): return re.sub( r'(?s)^[a-zA-Z0-9_.$]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code) def js_to_json(code): COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*' SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE) INTEGER_TABLE = ( (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16), (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8), ) def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v elif v.startswith('/*') or v.startswith('//') or v == ',': return "" if v[0] in ("'", '"'): v = re.sub(r'(?s)\\.|"', lambda m: { '"': '\\"', "\\'": "'", '\\\n': '', '\\x': '\\u00', }.get(m.group(0), m.group(0)), v[1:-1]) for regex, base in INTEGER_TABLE: im = re.match(regex, v) if im: i = int(im.group(1), base) return '"%d":' % i if v.endswith(':') else '%d' % i return '"%s"' % v return re.sub(r'''(?sx) "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'| {comment}|,(?={skip}[\]}}])| [a-zA-Z_][.a-zA-Z_0-9]*| \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?| [0-9]+(?={skip}:) '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code) def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(compat_shlex_quote(a) for a in args) def error_to_compat_str(err): err_str = str(err) # On python 2 error byte string must be decoded with proper # encoding rather than ascii if sys.version_info[0] < 3: err_str = err_str.decode(preferredencoding()) return err_str def mimetype2ext(mt): if mt is None: return None ext = { 'audio/mp4': 'm4a', # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as # it's the most popular one 'audio/mpeg': 'mp3', }.get(mt) if ext is not None: return ext _, _, res = mt.rpartition('/') res = res.split(';')[0].strip().lower() return { '3gpp': '3gp', 'smptett+xml': 'tt', 'srt': 'srt', 'ttaf+xml': 'dfxp', 'ttml+xml': 'ttml', 'vtt': 'vtt', 'x-flv': 'flv', 'x-mp4-fragmented': 'mp4', 'x-ms-wmv': 'wmv', 'mpegurl': 'm3u8', 'x-mpegurl': 'm3u8', 'vnd.apple.mpegurl': 'm3u8', 'dash+xml': 'mpd', 'f4m': 'f4m', 'f4m+xml': 'f4m', 'hds+xml': 'f4m', 'vnd.ms-sstr+xml': 'ism', 'quicktime': 'mov', }.get(res, res) def parse_codecs(codecs_str): # http://tools.ietf.org/html/rfc6381 if not codecs_str: return {} splited_codecs = list(filter(None, map( lambda str: str.strip(), codecs_str.strip().strip(',').split(',')))) vcodec, acodec = None, None for full_codec in splited_codecs: codec = full_codec.split('.')[0] if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v'): if not vcodec: vcodec = full_codec elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3'): if not acodec: acodec = full_codec else: write_string('WARNING: Unknown codec %s' % full_codec, sys.stderr) if not vcodec and not acodec: if len(splited_codecs) == 2: return { 'vcodec': vcodec, 'acodec': acodec, } elif len(splited_codecs) == 1: return { 'vcodec': 'none', 'acodec': vcodec, } else: return { 'vcodec': vcodec or 'none', 'acodec': acodec or 'none', } return {} def urlhandle_detect_ext(url_handle): getheader = url_handle.headers.get cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) if (m.group('strval') is not None or # If the original field is a string and matching comparisonvalue is # a number we should respect the origin of the original field # and process comparison value as a string (see # https://github.com/rg3/youtube-dl/issues/11082). actual_value is not None and m.group('intval') is not None and isinstance(actual_value, compat_str)): if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('strval') or m.group('intval') else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.')) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): _x = functools.partial(xpath_with_ns, ns_map={ 'ttml': 'http://www.w3.org/ns/ttml', 'ttaf1': 'http://www.w3.org/2006/10/ttaf1', 'ttaf1_0604': 'http://www.w3.org/2006/04/ttaf1', }) class TTMLPElementParser(object): out = '' def start(self, tag, attrib): if tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'): self.out += '\n' def end(self, tag): pass def data(self, data): self.out += data def close(self): return self.out.strip() def parse_node(node): target = TTMLPElementParser() parser = xml.etree.ElementTree.XMLParser(target=target) parser.feed(xml.etree.ElementTree.tostring(node)) return parser.close() dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8')) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall(_x('.//ttaf1_0604:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) dur = parse_dfxp_time_expr(para.attrib.get('dur')) if begin_time is None: continue if not end_time: if not dur: continue end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) if param: param = compat_str(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) return compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'): req.add_header('Ytdl-socks-proxy', proxy) # youtube-dl's http/https handlers do wrapping the socket with socks return None return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type) def ohdave_rsa_encrypt(data, exponent, modulus): ''' Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/ Input: data: data to encrypt, bytes-like object exponent, modulus: parameter e and N of RSA algorithm, both integer Output: hex string of encrypted data Limitation: supports one block encryption only ''' payload = int(binascii.hexlify(data[::-1]), 16) encrypted = pow(payload, exponent, modulus) return '%x' % encrypted def encode_base_n(num, n, table=None): FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' if not table: table = FULL_TABLE[:n] if n > len(table): raise ValueError('base %d exceeds table length %d' % (n, len(table))) if num == 0: return table[0] ret = '' while num: ret = table[num % n] + ret num = num // n return ret def decode_packed_codes(code): mobj = re.search(PACKED_CODES_RE, code) obfucasted_code, base, count, symbols = mobj.groups() base = int(base) count = int(count) symbols = symbols.split('|') symbol_table = {} while count: count -= 1 base_n_count = encode_base_n(count, base) symbol_table[base_n_count] = symbols[count] or base_n_count return re.sub( r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)], obfucasted_code) def parse_m3u8_attributes(attrib): info = {} for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib): if val.startswith('"'): val = val[1:-1] info[key] = val return info def urshift(val, n): return val >> n if val >= 0 else (val + 0x100000000) >> n # Based on png2str() written by @gdkchan and improved by @yokrysty # Originally posted at https://github.com/rg3/youtube-dl/issues/9706 def decode_png(png_data): # Reference: https://www.w3.org/TR/PNG/ header = png_data[8:] if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR': raise IOError('Not a valid PNG file.') int_map = {1: '>B', 2: '>H', 4: '>I'} unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0] chunks = [] while header: length = unpack_integer(header[:4]) header = header[4:] chunk_type = header[:4] header = header[4:] chunk_data = header[:length] header = header[length:] header = header[4:] # Skip CRC chunks.append({ 'type': chunk_type, 'length': length, 'data': chunk_data }) ihdr = chunks[0]['data'] width = unpack_integer(ihdr[:4]) height = unpack_integer(ihdr[4:8]) idat = b'' for chunk in chunks: if chunk['type'] == b'IDAT': idat += chunk['data'] if not idat: raise IOError('Unable to read PNG data.') decompressed_data = bytearray(zlib.decompress(idat)) stride = width * 3 pixels = [] def _get_pixel(idx): x = idx % stride y = idx // stride return pixels[y][x] for y in range(height): basePos = y * (1 + stride) filter_type = decompressed_data[basePos] current_row = [] pixels.append(current_row) for x in range(stride): color = decompressed_data[1 + basePos + x] basex = y * stride + x left = 0 up = 0 if x > 2: left = _get_pixel(basex - 3) if y > 0: up = _get_pixel(basex - stride) if filter_type == 1: # Sub color = (color + left) & 0xff elif filter_type == 2: # Up color = (color + up) & 0xff elif filter_type == 3: # Average color = (color + ((left + up) >> 1)) & 0xff elif filter_type == 4: # Paeth a = left b = up c = 0 if x > 2 and y > 0: c = _get_pixel(basex - stride - 3) p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: color = (color + a) & 0xff elif pb <= pc: color = (color + b) & 0xff else: color = (color + c) & 0xff current_row.append(color) return width, height, pixels def write_xattr(path, key, value): # This mess below finds the best xattr tool for the job try: # try the pyxattr module... import xattr if hasattr(xattr, 'set'): # pyxattr # Unicode arguments are not supported in python-pyxattr until # version 0.5.0 # See https://github.com/rg3/youtube-dl/issues/5498 pyxattr_required_version = '0.5.0' if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version): # TODO: fallback to CLI tools raise XAttrUnavailableError( 'python-pyxattr is detected but is too old. ' 'youtube-dl requires %s or above while your version is %s. ' 'Falling back to other xattr implementations' % ( pyxattr_required_version, xattr.__version__)) setxattr = xattr.set else: # xattr setxattr = xattr.setxattr try: setxattr(path, key, value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) except ImportError: if compat_os_name == 'nt': # Write xattrs to NTFS Alternate Data Streams: # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29 assert ':' not in key assert os.path.exists(path) ads_fn = path + ':' + key try: with open(ads_fn, 'wb') as f: f.write(value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) else: user_has_setfattr = check_executable('setfattr', ['--version']) user_has_xattr = check_executable('xattr', ['-h']) if user_has_setfattr or user_has_xattr: value = value.decode('utf-8') if user_has_setfattr: executable = 'setfattr' opts = ['-n', key, '-v', value] elif user_has_xattr: executable = 'xattr' opts = ['-w', key, value] cmd = ([encodeFilename(executable, True)] + [encodeArgument(o) for o in opts] + [encodeFilename(path, True)]) try: p = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) stdout, stderr = p.communicate() stderr = stderr.decode('utf-8', 'replace') if p.returncode != 0: raise XAttrMetadataError(p.returncode, stderr) else: # On Unix, and can't find pyxattr, setfattr, or xattr. if sys.platform.startswith('linux'): raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'pyxattr' or 'xattr' " "modules, or the GNU 'attr' package " "(which contains the 'setfattr' tool).") else: raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'xattr' module, " "or the 'xattr' binary.") #!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals import base64 import binascii import calendar import codecs import contextlib import ctypes import datetime import email.utils import errno import functools import gzip import io import itertools import json import locale import math import operator import os import pipes import platform import re import socket import ssl import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_HTMLParser, compat_basestring, compat_chr, compat_etree_fromstring, compat_html_entities, compat_html_entities_html5, compat_http_client, compat_kwargs, compat_os_name, compat_parse_qs, compat_shlex_quote, compat_socket_create_connection, compat_str, compat_struct_pack, compat_struct_unpack, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, compat_urllib_parse_unquote_plus, compat_urllib_request, compat_urlparse, compat_xpath, ) from .socks import ( ProxyType, sockssocket, ) def register_socks_protocols(): # "Register" SOCKS protocols # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904 # URLs with protocols not in urlparse.uses_netloc are not handled correctly for scheme in ('socks', 'socks4', 'socks4a', 'socks5'): if scheme not in compat_urlparse.uses_netloc: compat_urlparse.uses_netloc.append(scheme) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } USER_AGENTS = { 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] MONTH_NAMES = { 'en': ENGLISH_MONTH_NAMES, 'fr': [ 'janvier', 'février', 'mars', 'avril', 'mai', 'juin', 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'], } KNOWN_EXTENSIONS = ( 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', 'flv', 'f4v', 'f4a', 'f4b', 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', 'mkv', 'mka', 'mk3d', 'avi', 'divx', 'mov', 'asf', 'wmv', 'wma', '3gp', '3g2', 'mp3', 'flac', 'ape', 'wav', 'f4f', 'f4m', 'm3u8', 'smil') # needed for sanitizing filenames in restricted mode ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'], 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy'))) DATE_FORMATS = ( '%d %B %Y', '%d %b %Y', '%B %d %Y', '%B %dst %Y', '%B %dnd %Y', '%B %dth %Y', '%b %d %Y', '%b %dst %Y', '%b %dnd %Y', '%b %dth %Y', '%b %dst %Y %I:%M', '%b %dnd %Y %I:%M', '%b %dth %Y %I:%M', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', '%b %d %Y at %H:%M', '%b %d %Y at %H:%M:%S', ) DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS) DATE_FORMATS_DAY_FIRST.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d.%m.%y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS) DATE_FORMATS_MONTH_FIRST.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)" def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): for f in node.findall(compat_xpath(xpath)): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): return node.find(compat_xpath(xpath)) if isinstance(xpath, (str, compat_str)): n = _find_xpath(xpath) else: for xp in xpath: n = _find_xpath(xp) if n is not None: break if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute('id', id, html) def get_element_by_class(class_name, html): """Return the content of the first tag with the specified class in the passed HTML document""" retval = get_elements_by_class(class_name, html) return retval[0] if retval else None def get_element_by_attribute(attribute, value, html, escape_value=True): retval = get_elements_by_attribute(attribute, value, html, escape_value) return retval[0] if retval else None def get_elements_by_class(class_name, html): """Return the content of all tags with the specified class in the passed HTML document as a list""" return get_elements_by_attribute( 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name), html, escape_value=False) def get_elements_by_attribute(attribute, value, html, escape_value=True): """Return the content of the tag with the specified attribute in the passed HTML document""" value = re.escape(value) if escape_value else value retlist = [] for m in re.finditer(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), value), html): res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] retlist.append(unescapeHTML(res)) return retlist class HTMLAttributeParser(compat_HTMLParser): """Trivial HTML parser to gather the attributes for a single element""" def __init__(self): self.attrs = {} compat_HTMLParser.__init__(self) def handle_starttag(self, tag, attrs): self.attrs = dict(attrs) def extract_attributes(html_element): """Given a string for an HTML element such as <el a="foo" B="bar" c="&98;az" d=boz empty= noval entity="&amp;" sq='"' dq="'" > Decode and return a dictionary of attributes. { 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz', 'empty': '', 'noval': None, 'entity': '&', 'sq': '"', 'dq': '\'' }. NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions, but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5. """ parser = HTMLAttributeParser() parser.feed(html_element) parser.close() return parser.attrs def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if restricted and char in ACCENT_CHARS: return ACCENT_CHARS[char] if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of # unwanted failures due to missing protocol def sanitize_url(url): return 'http:%s' % url if url.startswith('//') else url def sanitized_Request(url, *args, **kwargs): return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity_with_semicolon): """Transforms an HTML entity to a character.""" entity = entity_with_semicolon[:-1] # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) # TODO: HTML5 allows entities without a semicolon. For example, # '&Eacuteric' should be decoded as 'Éric'. if entity_with_semicolon in compat_html_entities_html5: return compat_html_entities_html5[entity_with_semicolon] mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 # See https://github.com/rg3/youtube-dl/issues/7518 try: return compat_chr(int(numstr, base)) except ValueError: pass # Unknown entity in name, return its literal representation return '&%s;' % entity def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+;)', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible if sys.platform.startswith('java'): return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): # Both in bytes self.downloaded = downloaded self.expected = expected class XAttrMetadataError(Exception): def __init__(self, code=None, msg='Unknown error'): super(XAttrMetadataError, self).__init__(msg) self.code = code self.msg = msg # Parsing code and msg if (self.code in (errno.ENOSPC, errno.EDQUOT) or 'No space left' in self.msg or 'Disk quota excedded' in self.msg): self.reason = 'NO_SPACE' elif self.code == errno.E2BIG or 'Argument list too long' in self.msg: self.reason = 'VALUE_TOO_LONG' else: self.reason = 'NOT_SUPPORTED' class XAttrUnavailableError(Exception): pass def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs[b'strict'] = True hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc def handle_youtubedl_headers(headers): filtered_headers = headers if 'Youtubedl-no-compression' in filtered_headers: filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') del filtered_headers['Youtubedl-no-compression'] return filtered_headers class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-no-compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): conn_class = compat_http_client.HTTPConnection socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req = update_Request(req, url=url_escaped) for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) req.headers = handle_youtubedl_headers(req.headers) if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') else: location = location.decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] if sys.version_info < (3, 0): location_escaped = location_escaped.encode('utf-8') resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response def make_socks_conn_class(base_class, socks_proxy): assert issubclass(base_class, ( compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection)) url_components = compat_urlparse.urlparse(socks_proxy) if url_components.scheme.lower() == 'socks5': socks_type = ProxyType.SOCKS5 elif url_components.scheme.lower() in ('socks', 'socks4'): socks_type = ProxyType.SOCKS4 elif url_components.scheme.lower() == 'socks4a': socks_type = ProxyType.SOCKS4A def unquote_if_non_empty(s): if not s: return s return compat_urllib_parse_unquote_plus(s) proxy_args = ( socks_type, url_components.hostname, url_components.port or 1080, True, # Remote DNS unquote_if_non_empty(url_components.username), unquote_if_non_empty(url_components.password), ) class SocksConnection(base_class): def connect(self): self.sock = sockssocket() self.sock.setproxy(*proxy_args) if type(self.timeout) in (int, float): self.sock.settimeout(self.timeout) self.sock.connect((self.host, self.port)) if isinstance(self, compat_http_client.HTTPSConnection): if hasattr(self, '_context'): # Python > 2.6 self.sock = self._context.wrap_socket( self.sock, server_hostname=self.host) else: self.sock = ssl.wrap_socket(self.sock) return SocksConnection class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} conn_class = self._https_conn_class if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] return self.do_open(functools.partial( _create_http_connection, self, conn_class, True), req, **kwargs) class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def extract_timezone(date_str): m = re.search( r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group('tz'))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) return timezone, date_str def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: timezone, date_str = extract_timezone(date_str) try: date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) except ValueError: pass def date_formats(day_first=True): return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) _, date_str = extract_timezone(date_str) for expression in date_formats(day_first): try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: try: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') except ValueError: pass if upload_date is not None: return compat_str(upload_date) def unified_timestamp(date_str, day_first=True): if date_str is None: return None date_str = date_str.replace(',', ' ') pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0 timezone, date_str = extract_timezone(date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) for expression in date_formats(day_first): try: dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta) return calendar.timegm(dt.timetuple()) except ValueError: pass timetuple = email.utils.parsedate_tz(date_str) if timetuple: return calendar.timegm(timetuple) + pm_delta * 3600 def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download elif guess.rstrip('/') in KNOWN_EXTENSIONS: return guess.rstrip('/') else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad approximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, '%Y%m%d').date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( (b'GetStdHandle', ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( (b'GetConsoleMode', ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return compat_struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: # Some platforms, such as Jython, is missing fcntl try: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) except ImportError: UNSUPPORTED_MSG = 'file locking is not supported on this platform' def _lock_file(f, exclusive): raise IOError(UNSUPPORTED_MSG) def _unlock_file(f): raise IOError(UNSUPPORTED_MSG) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ url, idata = unsmuggle_url(url, {}) data.update(idata) sdata = compat_urllib_parse_urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def lookup_unit_table(unit_table, s): units_re = '|'.join(re.escape(u) for u in unit_table) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = unit_table[m.group('unit')] return int(float(num_str) * mult) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and unofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'bytes': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'kb': 1000, 'kilobytes': 1000, 'kibibytes': 1024, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'mb': 1000 ** 2, 'megabytes': 1000 ** 2, 'mebibytes': 1024 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'gb': 1000 ** 3, 'gigabytes': 1000 ** 3, 'gibibytes': 1024 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'tb': 1000 ** 4, 'terabytes': 1000 ** 4, 'tebibytes': 1024 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'pb': 1000 ** 5, 'petabytes': 1000 ** 5, 'pebibytes': 1024 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'eb': 1000 ** 6, 'exabytes': 1000 ** 6, 'exbibytes': 1024 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'zb': 1000 ** 7, 'zettabytes': 1000 ** 7, 'zebibytes': 1024 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, 'yb': 1000 ** 8, 'yottabytes': 1000 ** 8, 'yobibytes': 1024 ** 8, } return lookup_unit_table(_UNIT_TABLE, s) def parse_count(s): if s is None: return None s = s.strip() if re.match(r'^[\d,.]+$', s): return str_to_int(s) _UNIT_TABLE = { 'k': 1000, 'K': 1000, 'm': 1000 ** 2, 'M': 1000 ** 2, 'kk': 1000 ** 2, 'KK': 1000 ** 2, } return lookup_unit_table(_UNIT_TABLE, s) def month_by_name(name, lang='en'): """ Return the number of a month by (locale-independently) English name """ month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en']) try: return month_names.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) # ctypes in Jython is not complete # http://bugs.jython.org/issue2148 if sys.platform.startswith('java'): return try: libc = ctypes.cdll.LoadLibrary('libc.so.6') except OSError: return except TypeError: # LoadLibrary in Windows Python 2.7.13 only expects # a bytestring, but since unicode_literals turns # every string into a unicode string, it fails. return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): return s[len(start):] if s is not None and s.startswith(start) else s def remove_end(s, end): return s[:-len(end)] if s is not None and s.endswith(end) else s def remove_quotes(s): if s is None or len(s) < 2: return s for quote in ('"', "'", ): if s[0] == quote and s[-1] == quote: return s[1:-1] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] def base_url(url): return re.match(r'https?://[^?#&]+/', url).group() def urljoin(base, path): if not isinstance(path, compat_str) or not path: return None if re.match(r'^(?:https?:)?//', path): return path if not isinstance(base, compat_str) or not re.match(r'^(?:https?:)?//', base): return None return compat_urlparse.urljoin(base, path) class HEADRequest(compat_urllib_request.Request): def get_method(self): return 'HEAD' class PUTRequest(compat_urllib_request.Request): def get_method(self): return 'PUT' def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def strip_or_none(v): return None if v is None else v.strip() def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() days, hours, mins, secs, ms = [None] * 5 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match( r'''(?ix)(?:P?T)? (?: (?P<days>[0-9]+)\s*d(?:ays?)?\s* )? (?: (?P<hours>[0-9]+)\s*h(?:ours?)?\s* )? (?: (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s* )? (?: (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s* )?Z?$''', s) if m: days, hours, mins, secs, ms = m.groups() else: m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s) if m: hours, mins = m.groups() else: return None duration = 0 if secs: duration += float(secs) if mins: duration += float(mins) * 60 if hours: duration += float(hours) * 60 * 60 if days: duration += float(days) * 24 * 60 * 60 if ms: duration += float(ms) return duration def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers # SIGTTOU if youtube-dl is run in the background. # See https://github.com/rg3/youtube-dl/issues/955#issuecomment-209789656 out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize, use_cache=False): self._pagefunc = pagefunc self._pagesize = pagesize self._use_cache = use_cache if use_cache: self._cache = {} def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = None if self._use_cache: page_results = self._cache.get(pagenum) if page_results is None: page_results = list(self._pagefunc(pagenum)) if self._use_cache: self._cache[pagenum] = page_results startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( netloc=url_parsed.netloc.encode('idna').decode('ascii'), path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii') def update_url_query(url, query): if not query: return url parsed_url = compat_urlparse.urlparse(url) qs = compat_parse_qs(parsed_url.query) qs.update(query) return compat_urlparse.urlunparse(parsed_url._replace( query=compat_urllib_parse_urlencode(qs, True))) def update_Request(req, url=None, data=None, headers={}, query={}): req_headers = req.headers.copy() req_headers.update(headers) req_data = data or req.data req_url = update_url_query(url or req.get_full_url(), query) req_get_method = req.get_method() if req_get_method == 'HEAD': req_type = HEADRequest elif req_get_method == 'PUT': req_type = PUTRequest else: req_type = compat_urllib_request.Request new_req = req_type( req_url, data=req_data, headers=req_headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) if hasattr(req, 'timeout'): new_req.timeout = req.timeout return new_req def dict_get(d, key_or_keys, default=None, skip_false_values=True): if isinstance(key_or_keys, (list, tuple)): for key in key_or_keys: if key not in d or d[key] is None or skip_false_values and not d[key]: continue return d[key] return default return d.get(key_or_keys, default) def try_get(src, getter, expected_type=None): try: v = getter(src) except (AttributeError, KeyError, TypeError, IndexError): pass else: if expected_type is None or isinstance(v, expected_type): return v def encode_compat_str(string, encoding=preferredencoding(), errors='strict'): return string if isinstance(string, compat_str) else compat_str(string, encoding, errors) US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } TV_PARENTAL_GUIDELINES = { 'TV-Y': 0, 'TV-Y7': 7, 'TV-G': 0, 'TV-PG': 0, 'TV-14': 14, 'TV-MA': 17, } def parse_age_limit(s): if type(s) == int: return s if 0 <= s <= 21 else None if not isinstance(s, compat_basestring): return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) if m: return int(m.group('age')) if s in US_RATINGS: return US_RATINGS[s] return TV_PARENTAL_GUIDELINES.get(s) def strip_jsonp(code): return re.sub( r'(?s)^[a-zA-Z0-9_.$]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code) def js_to_json(code): COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*' SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE) INTEGER_TABLE = ( (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16), (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8), ) def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v elif v.startswith('/*') or v.startswith('//') or v == ',': return "" if v[0] in ("'", '"'): v = re.sub(r'(?s)\\.|"', lambda m: { '"': '\\"', "\\'": "'", '\\\n': '', '\\x': '\\u00', }.get(m.group(0), m.group(0)), v[1:-1]) for regex, base in INTEGER_TABLE: im = re.match(regex, v) if im: i = int(im.group(1), base) return '"%d":' % i if v.endswith(':') else '%d' % i return '"%s"' % v return re.sub(r'''(?sx) "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'| {comment}|,(?={skip}[\]}}])| [a-zA-Z_][.a-zA-Z_0-9]*| \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?| [0-9]+(?={skip}:) '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code) def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(compat_shlex_quote(a) for a in args) def error_to_compat_str(err): err_str = str(err) # On python 2 error byte string must be decoded with proper # encoding rather than ascii if sys.version_info[0] < 3: err_str = err_str.decode(preferredencoding()) return err_str def mimetype2ext(mt): if mt is None: return None ext = { 'audio/mp4': 'm4a', # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as # it's the most popular one 'audio/mpeg': 'mp3', }.get(mt) if ext is not None: return ext _, _, res = mt.rpartition('/') res = res.split(';')[0].strip().lower() return { '3gpp': '3gp', 'smptett+xml': 'tt', 'srt': 'srt', 'ttaf+xml': 'dfxp', 'ttml+xml': 'ttml', 'vtt': 'vtt', 'x-flv': 'flv', 'x-mp4-fragmented': 'mp4', 'x-ms-wmv': 'wmv', 'mpegurl': 'm3u8', 'x-mpegurl': 'm3u8', 'vnd.apple.mpegurl': 'm3u8', 'dash+xml': 'mpd', 'f4m': 'f4m', 'f4m+xml': 'f4m', 'hds+xml': 'f4m', 'vnd.ms-sstr+xml': 'ism', 'quicktime': 'mov', }.get(res, res) def parse_codecs(codecs_str): # http://tools.ietf.org/html/rfc6381 if not codecs_str: return {} splited_codecs = list(filter(None, map( lambda str: str.strip(), codecs_str.strip().strip(',').split(',')))) vcodec, acodec = None, None for full_codec in splited_codecs: codec = full_codec.split('.')[0] if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v'): if not vcodec: vcodec = full_codec elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3'): if not acodec: acodec = full_codec else: write_string('WARNING: Unknown codec %s' % full_codec, sys.stderr) if not vcodec and not acodec: if len(splited_codecs) == 2: return { 'vcodec': vcodec, 'acodec': acodec, } elif len(splited_codecs) == 1: return { 'vcodec': 'none', 'acodec': vcodec, } else: return { 'vcodec': vcodec or 'none', 'acodec': acodec or 'none', } return {} def urlhandle_detect_ext(url_handle): getheader = url_handle.headers.get cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<quote>["\'])(?P<quotedstrval>(?:\\.|(?!(?P=quote)|\\).)+?)(?P=quote)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) if (m.group('quotedstrval') is not None or m.group('strval') is not None or # If the original field is a string and matching comparisonvalue is # a number we should respect the origin of the original field # and process comparison value as a string (see # https://github.com/rg3/youtube-dl/issues/11082). actual_value is not None and m.group('intval') is not None and isinstance(actual_value, compat_str)): if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval') quote = m.group('quote') if quote is not None: comparison_value = comparison_value.replace(r'\%s' % quote, quote) else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.')) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): _x = functools.partial(xpath_with_ns, ns_map={ 'ttml': 'http://www.w3.org/ns/ttml', 'ttaf1': 'http://www.w3.org/2006/10/ttaf1', 'ttaf1_0604': 'http://www.w3.org/2006/04/ttaf1', }) class TTMLPElementParser(object): out = '' def start(self, tag, attrib): if tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'): self.out += '\n' def end(self, tag): pass def data(self, data): self.out += data def close(self): return self.out.strip() def parse_node(node): target = TTMLPElementParser() parser = xml.etree.ElementTree.XMLParser(target=target) parser.feed(xml.etree.ElementTree.tostring(node)) return parser.close() dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8')) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall(_x('.//ttaf1_0604:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) dur = parse_dfxp_time_expr(para.attrib.get('dur')) if begin_time is None: continue if not end_time: if not dur: continue end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) if param: param = compat_str(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) return compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'): req.add_header('Ytdl-socks-proxy', proxy) # youtube-dl's http/https handlers do wrapping the socket with socks return None return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type) def ohdave_rsa_encrypt(data, exponent, modulus): ''' Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/ Input: data: data to encrypt, bytes-like object exponent, modulus: parameter e and N of RSA algorithm, both integer Output: hex string of encrypted data Limitation: supports one block encryption only ''' payload = int(binascii.hexlify(data[::-1]), 16) encrypted = pow(payload, exponent, modulus) return '%x' % encrypted def encode_base_n(num, n, table=None): FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' if not table: table = FULL_TABLE[:n] if n > len(table): raise ValueError('base %d exceeds table length %d' % (n, len(table))) if num == 0: return table[0] ret = '' while num: ret = table[num % n] + ret num = num // n return ret def decode_packed_codes(code): mobj = re.search(PACKED_CODES_RE, code) obfucasted_code, base, count, symbols = mobj.groups() base = int(base) count = int(count) symbols = symbols.split('|') symbol_table = {} while count: count -= 1 base_n_count = encode_base_n(count, base) symbol_table[base_n_count] = symbols[count] or base_n_count return re.sub( r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)], obfucasted_code) def parse_m3u8_attributes(attrib): info = {} for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib): if val.startswith('"'): val = val[1:-1] info[key] = val return info def urshift(val, n): return val >> n if val >= 0 else (val + 0x100000000) >> n # Based on png2str() written by @gdkchan and improved by @yokrysty # Originally posted at https://github.com/rg3/youtube-dl/issues/9706 def decode_png(png_data): # Reference: https://www.w3.org/TR/PNG/ header = png_data[8:] if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR': raise IOError('Not a valid PNG file.') int_map = {1: '>B', 2: '>H', 4: '>I'} unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0] chunks = [] while header: length = unpack_integer(header[:4]) header = header[4:] chunk_type = header[:4] header = header[4:] chunk_data = header[:length] header = header[length:] header = header[4:] # Skip CRC chunks.append({ 'type': chunk_type, 'length': length, 'data': chunk_data }) ihdr = chunks[0]['data'] width = unpack_integer(ihdr[:4]) height = unpack_integer(ihdr[4:8]) idat = b'' for chunk in chunks: if chunk['type'] == b'IDAT': idat += chunk['data'] if not idat: raise IOError('Unable to read PNG data.') decompressed_data = bytearray(zlib.decompress(idat)) stride = width * 3 pixels = [] def _get_pixel(idx): x = idx % stride y = idx // stride return pixels[y][x] for y in range(height): basePos = y * (1 + stride) filter_type = decompressed_data[basePos] current_row = [] pixels.append(current_row) for x in range(stride): color = decompressed_data[1 + basePos + x] basex = y * stride + x left = 0 up = 0 if x > 2: left = _get_pixel(basex - 3) if y > 0: up = _get_pixel(basex - stride) if filter_type == 1: # Sub color = (color + left) & 0xff elif filter_type == 2: # Up color = (color + up) & 0xff elif filter_type == 3: # Average color = (color + ((left + up) >> 1)) & 0xff elif filter_type == 4: # Paeth a = left b = up c = 0 if x > 2 and y > 0: c = _get_pixel(basex - stride - 3) p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: color = (color + a) & 0xff elif pb <= pc: color = (color + b) & 0xff else: color = (color + c) & 0xff current_row.append(color) return width, height, pixels def write_xattr(path, key, value): # This mess below finds the best xattr tool for the job try: # try the pyxattr module... import xattr if hasattr(xattr, 'set'): # pyxattr # Unicode arguments are not supported in python-pyxattr until # version 0.5.0 # See https://github.com/rg3/youtube-dl/issues/5498 pyxattr_required_version = '0.5.0' if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version): # TODO: fallback to CLI tools raise XAttrUnavailableError( 'python-pyxattr is detected but is too old. ' 'youtube-dl requires %s or above while your version is %s. ' 'Falling back to other xattr implementations' % ( pyxattr_required_version, xattr.__version__)) setxattr = xattr.set else: # xattr setxattr = xattr.setxattr try: setxattr(path, key, value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) except ImportError: if compat_os_name == 'nt': # Write xattrs to NTFS Alternate Data Streams: # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29 assert ':' not in key assert os.path.exists(path) ads_fn = path + ':' + key try: with open(ads_fn, 'wb') as f: f.write(value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) else: user_has_setfattr = check_executable('setfattr', ['--version']) user_has_xattr = check_executable('xattr', ['-h']) if user_has_setfattr or user_has_xattr: value = value.decode('utf-8') if user_has_setfattr: executable = 'setfattr' opts = ['-n', key, '-v', value] elif user_has_xattr: executable = 'xattr' opts = ['-w', key, value] cmd = ([encodeFilename(executable, True)] + [encodeArgument(o) for o in opts] + [encodeFilename(path, True)]) try: p = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) stdout, stderr = p.communicate() stderr = stderr.decode('utf-8', 'replace') if p.returncode != 0: raise XAttrMetadataError(p.returncode, stderr) else: # On Unix, and can't find pyxattr, setfattr, or xattr. if sys.platform.startswith('linux'): raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'pyxattr' or 'xattr' " "modules, or the GNU 'attr' package " "(which contains the 'setfattr' tool).") else: raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'xattr' module, " "or the 'xattr' binary.")
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-22-fixed/youtube-dl/youtube_dl/utils.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-22-buggy/youtube-dl/youtube_dl/utils.py
youtube-dl-bug-34
#!/usr/bin/env python # -*- coding: utf-8 -*- import calendar import codecs import contextlib import ctypes import datetime import email.utils import errno import getpass import gzip import itertools import io import json import locale import math import os import pipes import platform import re import ssl import socket import struct import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib try: import urllib.request as compat_urllib_request except ImportError: # Python 2 import urllib2 as compat_urllib_request try: import urllib.error as compat_urllib_error except ImportError: # Python 2 import urllib2 as compat_urllib_error try: import urllib.parse as compat_urllib_parse except ImportError: # Python 2 import urllib as compat_urllib_parse try: from urllib.parse import urlparse as compat_urllib_parse_urlparse except ImportError: # Python 2 from urlparse import urlparse as compat_urllib_parse_urlparse try: import urllib.parse as compat_urlparse except ImportError: # Python 2 import urlparse as compat_urlparse try: import http.cookiejar as compat_cookiejar except ImportError: # Python 2 import cookielib as compat_cookiejar try: import html.entities as compat_html_entities except ImportError: # Python 2 import htmlentitydefs as compat_html_entities try: import html.parser as compat_html_parser except ImportError: # Python 2 import HTMLParser as compat_html_parser try: import http.client as compat_http_client except ImportError: # Python 2 import httplib as compat_http_client try: from urllib.error import HTTPError as compat_HTTPError except ImportError: # Python 2 from urllib2 import HTTPError as compat_HTTPError try: from urllib.request import urlretrieve as compat_urlretrieve except ImportError: # Python 2 from urllib import urlretrieve as compat_urlretrieve try: from subprocess import DEVNULL compat_subprocess_get_DEVNULL = lambda: DEVNULL except ImportError: compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w') try: from urllib.parse import unquote as compat_urllib_parse_unquote except ImportError: def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'): if string == '': return string res = string.split('%') if len(res) == 1: return string if encoding is None: encoding = 'utf-8' if errors is None: errors = 'replace' # pct_sequence: contiguous sequence of percent-encoded bytes, decoded pct_sequence = b'' string = res[0] for item in res[1:]: try: if not item: raise ValueError pct_sequence += item[:2].decode('hex') rest = item[2:] if not rest: # This segment was just a single percent-encoded character. # May be part of a sequence of code units, so delay decoding. # (Stored in pct_sequence). continue except ValueError: rest = '%' + item # Encountered non-percent-encoded characters. Flush the current # pct_sequence. string += pct_sequence.decode(encoding, errors) + rest pct_sequence = b'' if pct_sequence: # Flush the final pct_sequence string += pct_sequence.decode(encoding, errors) return string try: from urllib.parse import parse_qs as compat_parse_qs except ImportError: # Python 2 # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib. # Python 2's version is apparently totally broken def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace'): qs, _coerce_result = qs, unicode pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] r = [] for name_value in pairs: if not name_value and not strict_parsing: continue nv = name_value.split('=', 1) if len(nv) != 2: if strict_parsing: raise ValueError("bad query field: %r" % (name_value,)) # Handle case of a control-name with no equal sign if keep_blank_values: nv.append('') else: continue if len(nv[1]) or keep_blank_values: name = nv[0].replace('+', ' ') name = compat_urllib_parse_unquote( name, encoding=encoding, errors=errors) name = _coerce_result(name) value = nv[1].replace('+', ' ') value = compat_urllib_parse_unquote( value, encoding=encoding, errors=errors) value = _coerce_result(value) r.append((name, value)) return r def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace'): parsed_result = {} pairs = _parse_qsl(qs, keep_blank_values, strict_parsing, encoding=encoding, errors=errors) for name, value in pairs: if name in parsed_result: parsed_result[name].append(value) else: parsed_result[name] = [value] return parsed_result try: compat_str = unicode # Python 2 except NameError: compat_str = str try: compat_chr = unichr # Python 2 except NameError: compat_chr = chr try: from xml.etree.ElementTree import ParseError as compat_xml_parse_error except ImportError: # Python 2.6 from xml.parsers.expat import ExpatError as compat_xml_parse_error try: from shlex import quote as shlex_quote except ImportError: # Python < 3.3 def shlex_quote(s): return "'" + s.replace("'", "'\"'\"'") + "'" def compat_ord(c): if type(c) is int: return c else: return ord(c) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() u'TEST'.encode(pref) except: pref = 'UTF-8' return pref if sys.version_info < (3,0): def compat_print(s): print(s.encode(preferredencoding(), 'xmlcharrefreplace')) else: def compat_print(s): assert type(s) == type(u'') print(s) def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically """ args = { 'suffix': '.tmp', 'prefix': os.path.basename(fn) + '.', 'dir': os.path.dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**args) try: with tf: json.dump(obj, tf) os.rename(tf.name, fn) except: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z-]+$', key) assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val) expr = xpath + u"[@%s='%s']" % (key, val) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val): # Here comes the crazy part: In 2.6, if the xpath is a unicode, # .//node does not match if a node is a direct child of . ! if isinstance(xpath, unicode): xpath = xpath.encode('ascii') for f in node.findall(xpath): if f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_text(node, xpath, name=None, fatal=False): if sys.version_info < (2, 7): # Crazy 2.6 xpath = xpath.encode('ascii') n = node.find(xpath) if n is None: if fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n.text compat_html_parser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix class BaseHTMLParser(compat_html_parser.HTMLParser): def __init(self): compat_html_parser.HTMLParser.__init__(self) self.html = None def loads(self, html): self.html = html self.feed(html) self.close() class AttrParser(BaseHTMLParser): """Modified HTMLParser that isolates a tag with the specified attribute""" def __init__(self, attribute, value): self.attribute = attribute self.value = value self.result = None self.started = False self.depth = {} self.watch_startpos = False self.error_count = 0 BaseHTMLParser.__init__(self) def error(self, message): if self.error_count > 10 or self.started: raise compat_html_parser.HTMLParseError(message, self.getpos()) self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line self.error_count += 1 self.goahead(1) def handle_starttag(self, tag, attrs): attrs = dict(attrs) if self.started: self.find_startpos(None) if self.attribute in attrs and attrs[self.attribute] == self.value: self.result = [tag] self.started = True self.watch_startpos = True if self.started: if not tag in self.depth: self.depth[tag] = 0 self.depth[tag] += 1 def handle_endtag(self, tag): if self.started: if tag in self.depth: self.depth[tag] -= 1 if self.depth[self.result[0]] == 0: self.started = False self.result.append(self.getpos()) def find_startpos(self, x): """Needed to put the start position of the result (self.result[1]) after the opening tag with the requested id""" if self.watch_startpos: self.watch_startpos = False self.result.append(self.getpos()) handle_entityref = handle_charref = handle_data = handle_comment = \ handle_decl = handle_pi = unknown_decl = find_startpos def get_result(self): if self.result is None: return None if len(self.result) != 3: return None lines = self.html.split('\n') lines = lines[self.result[1][0]-1:self.result[2][0]] lines[0] = lines[0][self.result[1][1]:] if len(lines) == 1: lines[-1] = lines[-1][:self.result[2][1]-self.result[1][1]] lines[-1] = lines[-1][:self.result[2][1]] return '\n'.join(lines).strip() # Hack for https://github.com/rg3/youtube-dl/issues/662 if sys.version_info < (2, 7, 3): AttrParser.parse_endtag = (lambda self, i: i + len("</scr'+'ipt>") if self.rawdata[i:].startswith("</scr'+'ipt>") else compat_html_parser.HTMLParser.parse_endtag(self, i)) def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute("id", id, html) def get_element_by_attribute(attribute, value, html): """Return the content of the tag with the specified attribute in the passed HTML document""" parser = AttrParser(attribute, value) try: parser.loads(html) except compat_html_parser.HTMLParseError: pass return parser.get_result() class MetaParser(BaseHTMLParser): """ Modified HTMLParser that isolates a meta tag with the specified name attribute. """ def __init__(self, name): BaseHTMLParser.__init__(self) self.name = name self.content = None self.result = None def handle_starttag(self, tag, attrs): if tag != 'meta': return attrs = dict(attrs) if attrs.get('name') == self.name: self.result = attrs.get('content') def get_result(self): return self.result def get_meta_content(name, html): """ Return the content attribute from the meta tag with the given name attribute. """ parser = MetaParser(name) try: parser.loads(html) except compat_html_parser.HTMLParseError: pass return parser.get_result() def clean_html(html): """Clean an HTML snippet into a readable string""" # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == u'-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = os.path.join( re.sub(u'[/<>:"\\|\\\\?\\*]', u'#', path_part) for path_part in os.path.split(filename) ) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char result = u''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if not result: result = '_' return result def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity): """Transforms an HTML entity to a character.""" # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) mobj = re.match(r'#(x?[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith(u'x'): base = 16 numstr = u'0%s' % numstr else: base = 10 return compat_chr(int(numstr, base)) # Unknown entity in name, return its literal representation return (u'&%s;' % entity) def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+);', lambda m: _htmlentity_transform(m.group(1)), s) def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # Pass u'' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess: return s else: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return s.encode(encoding, 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors #assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(opts_no_check_certificate, **kwargs): if sys.version_info < (3, 2): import httplib class HTTPSConnectionV3(httplib.HTTPSConnection): def __init__(self, *args, **kwargs): httplib.HTTPSConnection.__init__(self, *args, **kwargs) def connect(self): sock = socket.create_connection((self.host, self.port), self.timeout) if getattr(self, '_tunnel_host', False): self.sock = sock self._tunnel() try: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) except ssl.SSLError: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv23) class HTTPSHandlerV3(compat_urllib_request.HTTPSHandler): def https_open(self, req): return self.do_open(HTTPSConnectionV3, req) return HTTPSHandlerV3(**kwargs) elif hasattr(ssl, 'create_default_context'): # Python >= 3.4 context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) context.options &= ~ssl.OP_NO_SSLv3 # Allow older, not-as-secure SSLv3 if opts_no_check_certificate: context.verify_mode = ssl.CERT_NONE return compat_urllib_request.HTTPSHandler(context=context, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() try: context.load_default_certs() except AttributeError: pass # Python < 3.4 return compat_urllib_request.HTTPSHandler(context=context, **kwargs) class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if not expected: msg = msg + u'; please report this issue on https://yt-dl.org/bug . Be sure to call youtube-dl with the --verbose flag and include its complete output. Make sure you are using the latest version; type youtube-dl -U to update.' super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return u''.join(traceback.format_tb(self.traceback)) class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ # Both in bytes downloaded = None expected = None def __init__(self, downloaded, expected): self.downloaded = downloaded self.expected = expected class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-No-Compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): for h, v in std_headers.items(): if h not in req.headers: req.add_header(h, v) if 'Youtubedl-no-compression' in req.headers: if 'Accept-encoding' in req.headers: del req.headers['Accept-encoding'] del req.headers['Youtubedl-no-compression'] if 'Youtubedl-user-agent' in req.headers: if 'User-agent' in req.headers: del req.headers['User-agent'] req.headers['User-agent'] = req.headers['Youtubedl-user-agent'] del req.headers['Youtubedl-user-agent'] if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg return resp https_request = http_request https_response = http_response def parse_iso8601(date_str, delimiter='T'): """ Return a UNIX timestamp from the given date """ if date_str is None: return None m = re.search( r'Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group(0))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) def unified_strdate(date_str): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None #Replace commas date_str = date_str.replace(',', ' ') # %z (UTC offset) is only supported in python>=3.2 date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str) format_expressions = [ '%d %B %Y', '%d %b %Y', '%B %d %Y', '%b %d %Y', '%b %dst %Y %I:%M%p', '%b %dnd %Y %I:%M%p', '%b %dth %Y %I:%M%p', '%Y-%m-%d', '%Y/%m/%d', '%d.%m.%Y', '%d/%m/%Y', '%d/%m/%y', '%Y/%m/%d %H:%M:%S', '%d/%m/%Y %H:%M:%S', '%Y-%m-%d %H:%M:%S', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', ] for expression in format_expressions: try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') return upload_date def determine_ext(url, default_ext=u'unknown_video'): if url is None: return default_ext guess = url.partition(u'?')[0].rpartition(u'.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str == 'now'or date_str == 'today': return today match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') #A bad aproximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, "%Y%m%d").date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day,day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % ( self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( ("GetStdHandle", ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)(("WriteConsoleW", ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(("GetFileType", ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( ("GetConsoleMode", ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' if isinstance(chr(0), bytes): # Python 2 return ''.join([chr(x) for x in xs]) else: return bytes(xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def shell_quote(args): quoted_args = [] encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return u' '.join(quoted_args) def takewhile_inclusive(pred, seq): """ Like itertools.takewhile, but include the latest evaluated element (the first element so that Not pred(e)) """ for e in seq: yield e if not pred(e): return def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ sdata = compat_urllib_parse.urlencode( {u'__youtubedl_smuggle': json.dumps(data)}) return url + u'#' + sdata def unsmuggle_url(smug_url, default=None): if not '#__youtubedl_smuggle' in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition(u'#') jsond = compat_parse_qs(sdata)[u'__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return u'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = [u'B', u'KiB', u'MiB', u'GiB', u'TiB', u'PiB', u'EiB', u'ZiB', u'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return u'%.2f%s' % (converted, suffix) def get_term_width(): columns = os.environ.get('COLUMNS', None) if columns: return int(columns) try: sp = subprocess.Popen( ['stty', 'size'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = sp.communicate() return int(out.split()[1]) except: pass return None def month_by_name(name): """ Return the number of a month by (locale-independently) English name """ ENGLISH_NAMES = [ u'January', u'February', u'March', u'April', u'May', u'June', u'July', u'August', u'September', u'October', u'November', u'December'] try: return ENGLISH_NAMES.index(name) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', u'&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) try: libc = ctypes.cdll.LoadLibrary("libc.so.6") except OSError: return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): if s.startswith(start): return s[len(start):] return s def remove_end(s, end): if s.endswith(end): return s[:-len(end)] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip(u'/').split(u'/')[-1] class HEADRequest(compat_urllib_request.Request): def get_method(self): return "HEAD" def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None return default if v is None else (int(v) * invscale // scale) def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', u'', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): return default if v is None else (float(v) * invscale / scale) def parse_duration(s): if s is None: return None s = s.strip() m = re.match( r'(?i)(?:(?:(?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s*)?(?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s*)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)?$', s) if not m: return None res = int(m.group('secs')) if m.group('mins'): res += int(m.group('mins')) * 60 if m.group('hours'): res += int(m.group('hours')) * 60 * 60 if m.group('ms'): res += float(m.group('ms')) return res def prepend_extension(filename, ext): name, real_ext = os.path.splitext(filename) return u'{0}.{1}{2}'.format(name, ext, real_ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize): self._pagefunc = pagefunc self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = list(self._pagefunc(pagenum)) startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, unicode): s = s.encode('utf-8') return compat_urllib_parse.quote(s, "%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() try: struct.pack(u'!I', 0) except TypeError: # In Python 2.6 (and some 2.7 versions), struct requires a bytes argument def struct_pack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.pack(spec, *args) def struct_unpack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.unpack(spec, *args) else: struct_pack = struct.pack struct_unpack = struct.unpack def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = u'\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii') try: etree_iter = xml.etree.ElementTree.Element.iter except AttributeError: # Python <=2.6 etree_iter = lambda n: n.findall('.//*') def parse_xml(s): class TreeBuilder(xml.etree.ElementTree.TreeBuilder): def doctype(self, name, pubid, system): pass # Ignore doctypes parser = xml.etree.ElementTree.XMLParser(target=TreeBuilder()) kwargs = {'parser': parser} if sys.version_info >= (2, 7) else {} tree = xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs) # Fix up XML parser in Python 2.x if sys.version_info < (3, 0): for n in etree_iter(tree): if n.text is not None: if not isinstance(n.text, compat_str): n.text = n.text.decode('utf-8') return tree if sys.version_info < (3, 0) and sys.platform == 'win32': def compat_getpass(prompt, *args, **kwargs): if isinstance(prompt, compat_str): prompt = prompt.encode(preferredencoding()) return getpass.getpass(prompt, *args, **kwargs) else: compat_getpass = getpass.getpass US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } def strip_jsonp(code): return re.sub(r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?\s*$', r'\1', code) def js_to_json(code): def fix_kv(m): key = m.group(2) if key.startswith("'"): assert key.endswith("'") assert '"' not in key key = '"%s"' % key[1:-1] elif not key.startswith('"'): key = '"%s"' % key value = m.group(4) if value.startswith("'"): assert value.endswith("'") assert '"' not in value value = '"%s"' % value[1:-1] return m.group(1) + key + m.group(3) + value res = re.sub(r'''(?x) ([{,]\s*) ("[^"]*"|\'[^\']*\'|[a-z0-9A-Z]+) (:\s*) ([0-9.]+|true|false|"[^"]*"|\'[^\']*\'|\[|\{) ''', fix_kv, code) res = re.sub(r',(\s*\])', lambda m: m.group(1), res) return res def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' try: subprocess_check_output = subprocess.check_output except AttributeError: def subprocess_check_output(*args, **kwargs): assert 'input' not in kwargs p = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs) output, _ = p.communicate() ret = p.poll() if ret: raise subprocess.CalledProcessError(ret, p.args, output=output) return output def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s #!/usr/bin/env python # -*- coding: utf-8 -*- import calendar import codecs import contextlib import ctypes import datetime import email.utils import errno import getpass import gzip import itertools import io import json import locale import math import os import pipes import platform import re import ssl import socket import struct import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib try: import urllib.request as compat_urllib_request except ImportError: # Python 2 import urllib2 as compat_urllib_request try: import urllib.error as compat_urllib_error except ImportError: # Python 2 import urllib2 as compat_urllib_error try: import urllib.parse as compat_urllib_parse except ImportError: # Python 2 import urllib as compat_urllib_parse try: from urllib.parse import urlparse as compat_urllib_parse_urlparse except ImportError: # Python 2 from urlparse import urlparse as compat_urllib_parse_urlparse try: import urllib.parse as compat_urlparse except ImportError: # Python 2 import urlparse as compat_urlparse try: import http.cookiejar as compat_cookiejar except ImportError: # Python 2 import cookielib as compat_cookiejar try: import html.entities as compat_html_entities except ImportError: # Python 2 import htmlentitydefs as compat_html_entities try: import html.parser as compat_html_parser except ImportError: # Python 2 import HTMLParser as compat_html_parser try: import http.client as compat_http_client except ImportError: # Python 2 import httplib as compat_http_client try: from urllib.error import HTTPError as compat_HTTPError except ImportError: # Python 2 from urllib2 import HTTPError as compat_HTTPError try: from urllib.request import urlretrieve as compat_urlretrieve except ImportError: # Python 2 from urllib import urlretrieve as compat_urlretrieve try: from subprocess import DEVNULL compat_subprocess_get_DEVNULL = lambda: DEVNULL except ImportError: compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w') try: from urllib.parse import unquote as compat_urllib_parse_unquote except ImportError: def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'): if string == '': return string res = string.split('%') if len(res) == 1: return string if encoding is None: encoding = 'utf-8' if errors is None: errors = 'replace' # pct_sequence: contiguous sequence of percent-encoded bytes, decoded pct_sequence = b'' string = res[0] for item in res[1:]: try: if not item: raise ValueError pct_sequence += item[:2].decode('hex') rest = item[2:] if not rest: # This segment was just a single percent-encoded character. # May be part of a sequence of code units, so delay decoding. # (Stored in pct_sequence). continue except ValueError: rest = '%' + item # Encountered non-percent-encoded characters. Flush the current # pct_sequence. string += pct_sequence.decode(encoding, errors) + rest pct_sequence = b'' if pct_sequence: # Flush the final pct_sequence string += pct_sequence.decode(encoding, errors) return string try: from urllib.parse import parse_qs as compat_parse_qs except ImportError: # Python 2 # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib. # Python 2's version is apparently totally broken def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace'): qs, _coerce_result = qs, unicode pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] r = [] for name_value in pairs: if not name_value and not strict_parsing: continue nv = name_value.split('=', 1) if len(nv) != 2: if strict_parsing: raise ValueError("bad query field: %r" % (name_value,)) # Handle case of a control-name with no equal sign if keep_blank_values: nv.append('') else: continue if len(nv[1]) or keep_blank_values: name = nv[0].replace('+', ' ') name = compat_urllib_parse_unquote( name, encoding=encoding, errors=errors) name = _coerce_result(name) value = nv[1].replace('+', ' ') value = compat_urllib_parse_unquote( value, encoding=encoding, errors=errors) value = _coerce_result(value) r.append((name, value)) return r def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace'): parsed_result = {} pairs = _parse_qsl(qs, keep_blank_values, strict_parsing, encoding=encoding, errors=errors) for name, value in pairs: if name in parsed_result: parsed_result[name].append(value) else: parsed_result[name] = [value] return parsed_result try: compat_str = unicode # Python 2 except NameError: compat_str = str try: compat_chr = unichr # Python 2 except NameError: compat_chr = chr try: from xml.etree.ElementTree import ParseError as compat_xml_parse_error except ImportError: # Python 2.6 from xml.parsers.expat import ExpatError as compat_xml_parse_error try: from shlex import quote as shlex_quote except ImportError: # Python < 3.3 def shlex_quote(s): return "'" + s.replace("'", "'\"'\"'") + "'" def compat_ord(c): if type(c) is int: return c else: return ord(c) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() u'TEST'.encode(pref) except: pref = 'UTF-8' return pref if sys.version_info < (3,0): def compat_print(s): print(s.encode(preferredencoding(), 'xmlcharrefreplace')) else: def compat_print(s): assert type(s) == type(u'') print(s) def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically """ args = { 'suffix': '.tmp', 'prefix': os.path.basename(fn) + '.', 'dir': os.path.dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**args) try: with tf: json.dump(obj, tf) os.rename(tf.name, fn) except: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z-]+$', key) assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val) expr = xpath + u"[@%s='%s']" % (key, val) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val): # Here comes the crazy part: In 2.6, if the xpath is a unicode, # .//node does not match if a node is a direct child of . ! if isinstance(xpath, unicode): xpath = xpath.encode('ascii') for f in node.findall(xpath): if f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_text(node, xpath, name=None, fatal=False): if sys.version_info < (2, 7): # Crazy 2.6 xpath = xpath.encode('ascii') n = node.find(xpath) if n is None: if fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n.text compat_html_parser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix class BaseHTMLParser(compat_html_parser.HTMLParser): def __init(self): compat_html_parser.HTMLParser.__init__(self) self.html = None def loads(self, html): self.html = html self.feed(html) self.close() class AttrParser(BaseHTMLParser): """Modified HTMLParser that isolates a tag with the specified attribute""" def __init__(self, attribute, value): self.attribute = attribute self.value = value self.result = None self.started = False self.depth = {} self.watch_startpos = False self.error_count = 0 BaseHTMLParser.__init__(self) def error(self, message): if self.error_count > 10 or self.started: raise compat_html_parser.HTMLParseError(message, self.getpos()) self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line self.error_count += 1 self.goahead(1) def handle_starttag(self, tag, attrs): attrs = dict(attrs) if self.started: self.find_startpos(None) if self.attribute in attrs and attrs[self.attribute] == self.value: self.result = [tag] self.started = True self.watch_startpos = True if self.started: if not tag in self.depth: self.depth[tag] = 0 self.depth[tag] += 1 def handle_endtag(self, tag): if self.started: if tag in self.depth: self.depth[tag] -= 1 if self.depth[self.result[0]] == 0: self.started = False self.result.append(self.getpos()) def find_startpos(self, x): """Needed to put the start position of the result (self.result[1]) after the opening tag with the requested id""" if self.watch_startpos: self.watch_startpos = False self.result.append(self.getpos()) handle_entityref = handle_charref = handle_data = handle_comment = \ handle_decl = handle_pi = unknown_decl = find_startpos def get_result(self): if self.result is None: return None if len(self.result) != 3: return None lines = self.html.split('\n') lines = lines[self.result[1][0]-1:self.result[2][0]] lines[0] = lines[0][self.result[1][1]:] if len(lines) == 1: lines[-1] = lines[-1][:self.result[2][1]-self.result[1][1]] lines[-1] = lines[-1][:self.result[2][1]] return '\n'.join(lines).strip() # Hack for https://github.com/rg3/youtube-dl/issues/662 if sys.version_info < (2, 7, 3): AttrParser.parse_endtag = (lambda self, i: i + len("</scr'+'ipt>") if self.rawdata[i:].startswith("</scr'+'ipt>") else compat_html_parser.HTMLParser.parse_endtag(self, i)) def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute("id", id, html) def get_element_by_attribute(attribute, value, html): """Return the content of the tag with the specified attribute in the passed HTML document""" parser = AttrParser(attribute, value) try: parser.loads(html) except compat_html_parser.HTMLParseError: pass return parser.get_result() class MetaParser(BaseHTMLParser): """ Modified HTMLParser that isolates a meta tag with the specified name attribute. """ def __init__(self, name): BaseHTMLParser.__init__(self) self.name = name self.content = None self.result = None def handle_starttag(self, tag, attrs): if tag != 'meta': return attrs = dict(attrs) if attrs.get('name') == self.name: self.result = attrs.get('content') def get_result(self): return self.result def get_meta_content(name, html): """ Return the content attribute from the meta tag with the given name attribute. """ parser = MetaParser(name) try: parser.loads(html) except compat_html_parser.HTMLParseError: pass return parser.get_result() def clean_html(html): """Clean an HTML snippet into a readable string""" # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == u'-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = os.path.join( re.sub(u'[/<>:"\\|\\\\?\\*]', u'#', path_part) for path_part in os.path.split(filename) ) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char result = u''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if not result: result = '_' return result def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity): """Transforms an HTML entity to a character.""" # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) mobj = re.match(r'#(x?[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith(u'x'): base = 16 numstr = u'0%s' % numstr else: base = 10 return compat_chr(int(numstr, base)) # Unknown entity in name, return its literal representation return (u'&%s;' % entity) def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+);', lambda m: _htmlentity_transform(m.group(1)), s) def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # Pass u'' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess: return s else: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return s.encode(encoding, 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors #assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(opts_no_check_certificate, **kwargs): if sys.version_info < (3, 2): import httplib class HTTPSConnectionV3(httplib.HTTPSConnection): def __init__(self, *args, **kwargs): httplib.HTTPSConnection.__init__(self, *args, **kwargs) def connect(self): sock = socket.create_connection((self.host, self.port), self.timeout) if getattr(self, '_tunnel_host', False): self.sock = sock self._tunnel() try: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) except ssl.SSLError: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv23) class HTTPSHandlerV3(compat_urllib_request.HTTPSHandler): def https_open(self, req): return self.do_open(HTTPSConnectionV3, req) return HTTPSHandlerV3(**kwargs) elif hasattr(ssl, 'create_default_context'): # Python >= 3.4 context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) context.options &= ~ssl.OP_NO_SSLv3 # Allow older, not-as-secure SSLv3 if opts_no_check_certificate: context.verify_mode = ssl.CERT_NONE return compat_urllib_request.HTTPSHandler(context=context, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() try: context.load_default_certs() except AttributeError: pass # Python < 3.4 return compat_urllib_request.HTTPSHandler(context=context, **kwargs) class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += u' (caused by %r)' % cause if not expected: msg = msg + u'; please report this issue on https://yt-dl.org/bug . Be sure to call youtube-dl with the --verbose flag and include its complete output. Make sure you are using the latest version; type youtube-dl -U to update.' super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return u''.join(traceback.format_tb(self.traceback)) class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ # Both in bytes downloaded = None expected = None def __init__(self, downloaded, expected): self.downloaded = downloaded self.expected = expected class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-No-Compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): for h, v in std_headers.items(): if h not in req.headers: req.add_header(h, v) if 'Youtubedl-no-compression' in req.headers: if 'Accept-encoding' in req.headers: del req.headers['Accept-encoding'] del req.headers['Youtubedl-no-compression'] if 'Youtubedl-user-agent' in req.headers: if 'User-agent' in req.headers: del req.headers['User-agent'] req.headers['User-agent'] = req.headers['Youtubedl-user-agent'] del req.headers['Youtubedl-user-agent'] if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg return resp https_request = http_request https_response = http_response def parse_iso8601(date_str, delimiter='T'): """ Return a UNIX timestamp from the given date """ if date_str is None: return None m = re.search( r'Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group(0))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) def unified_strdate(date_str): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None #Replace commas date_str = date_str.replace(',', ' ') # %z (UTC offset) is only supported in python>=3.2 date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str) format_expressions = [ '%d %B %Y', '%d %b %Y', '%B %d %Y', '%b %d %Y', '%b %dst %Y %I:%M%p', '%b %dnd %Y %I:%M%p', '%b %dth %Y %I:%M%p', '%Y-%m-%d', '%Y/%m/%d', '%d.%m.%Y', '%d/%m/%Y', '%d/%m/%y', '%Y/%m/%d %H:%M:%S', '%d/%m/%Y %H:%M:%S', '%Y-%m-%d %H:%M:%S', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', ] for expression in format_expressions: try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') return upload_date def determine_ext(url, default_ext=u'unknown_video'): if url is None: return default_ext guess = url.partition(u'?')[0].rpartition(u'.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str == 'now'or date_str == 'today': return today match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') #A bad aproximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, "%Y%m%d").date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day,day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % ( self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( ("GetStdHandle", ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)(("WriteConsoleW", ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(("GetFileType", ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( ("GetConsoleMode", ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' if isinstance(chr(0), bytes): # Python 2 return ''.join([chr(x) for x in xs]) else: return bytes(xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def shell_quote(args): quoted_args = [] encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return u' '.join(quoted_args) def takewhile_inclusive(pred, seq): """ Like itertools.takewhile, but include the latest evaluated element (the first element so that Not pred(e)) """ for e in seq: yield e if not pred(e): return def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ sdata = compat_urllib_parse.urlencode( {u'__youtubedl_smuggle': json.dumps(data)}) return url + u'#' + sdata def unsmuggle_url(smug_url, default=None): if not '#__youtubedl_smuggle' in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition(u'#') jsond = compat_parse_qs(sdata)[u'__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return u'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = [u'B', u'KiB', u'MiB', u'GiB', u'TiB', u'PiB', u'EiB', u'ZiB', u'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return u'%.2f%s' % (converted, suffix) def get_term_width(): columns = os.environ.get('COLUMNS', None) if columns: return int(columns) try: sp = subprocess.Popen( ['stty', 'size'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = sp.communicate() return int(out.split()[1]) except: pass return None def month_by_name(name): """ Return the number of a month by (locale-independently) English name """ ENGLISH_NAMES = [ u'January', u'February', u'March', u'April', u'May', u'June', u'July', u'August', u'September', u'October', u'November', u'December'] try: return ENGLISH_NAMES.index(name) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', u'&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) try: libc = ctypes.cdll.LoadLibrary("libc.so.6") except OSError: return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): if s.startswith(start): return s[len(start):] return s def remove_end(s, end): if s.endswith(end): return s[:-len(end)] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip(u'/').split(u'/')[-1] class HEADRequest(compat_urllib_request.Request): def get_method(self): return "HEAD" def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None return default if v is None else (int(v) * invscale // scale) def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', u'', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): return default if v is None else (float(v) * invscale / scale) def parse_duration(s): if s is None: return None s = s.strip() m = re.match( r'(?i)(?:(?:(?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s*)?(?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s*)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)?$', s) if not m: return None res = int(m.group('secs')) if m.group('mins'): res += int(m.group('mins')) * 60 if m.group('hours'): res += int(m.group('hours')) * 60 * 60 if m.group('ms'): res += float(m.group('ms')) return res def prepend_extension(filename, ext): name, real_ext = os.path.splitext(filename) return u'{0}.{1}{2}'.format(name, ext, real_ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize): self._pagefunc = pagefunc self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = list(self._pagefunc(pagenum)) startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, unicode): s = s.encode('utf-8') return compat_urllib_parse.quote(s, "%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() try: struct.pack(u'!I', 0) except TypeError: # In Python 2.6 (and some 2.7 versions), struct requires a bytes argument def struct_pack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.pack(spec, *args) def struct_unpack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.unpack(spec, *args) else: struct_pack = struct.pack struct_unpack = struct.unpack def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = u'\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii') try: etree_iter = xml.etree.ElementTree.Element.iter except AttributeError: # Python <=2.6 etree_iter = lambda n: n.findall('.//*') def parse_xml(s): class TreeBuilder(xml.etree.ElementTree.TreeBuilder): def doctype(self, name, pubid, system): pass # Ignore doctypes parser = xml.etree.ElementTree.XMLParser(target=TreeBuilder()) kwargs = {'parser': parser} if sys.version_info >= (2, 7) else {} tree = xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs) # Fix up XML parser in Python 2.x if sys.version_info < (3, 0): for n in etree_iter(tree): if n.text is not None: if not isinstance(n.text, compat_str): n.text = n.text.decode('utf-8') return tree if sys.version_info < (3, 0) and sys.platform == 'win32': def compat_getpass(prompt, *args, **kwargs): if isinstance(prompt, compat_str): prompt = prompt.encode(preferredencoding()) return getpass.getpass(prompt, *args, **kwargs) else: compat_getpass = getpass.getpass US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } def strip_jsonp(code): return re.sub(r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?\s*$', r'\1', code) def js_to_json(code): def fix_kv(m): key = m.group(2) if key.startswith("'"): assert key.endswith("'") assert '"' not in key key = '"%s"' % key[1:-1] elif not key.startswith('"'): key = '"%s"' % key value = m.group(4) if value.startswith("'"): assert value.endswith("'") assert '"' not in value value = '"%s"' % value[1:-1] return m.group(1) + key + m.group(3) + value res = re.sub(r'''(?x) ([{,]\s*) ("[^"]*"|\'[^\']*\'|[a-z0-9A-Z]+) (:\s*) ([0-9.]+|true|false|"[^"]*"|\'[^\']*\'| (?=\[|\{) ) ''', fix_kv, code) res = re.sub(r',(\s*\])', lambda m: m.group(1), res) return res def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' try: subprocess_check_output = subprocess.check_output except AttributeError: def subprocess_check_output(*args, **kwargs): assert 'input' not in kwargs p = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs) output, _ = p.communicate() ret = p.poll() if ret: raise subprocess.CalledProcessError(ret, p.args, output=output) return output def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-34-fixed/youtube-dl/youtube_dl/utils.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-34-buggy/youtube-dl/youtube_dl/utils.py
youtube-dl-bug-29
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals import base64 import calendar import codecs import contextlib import ctypes import datetime import email.utils import errno import functools import gzip import itertools import io import json import locale import math import operator import os import pipes import platform import re import ssl import socket import struct import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_basestring, compat_chr, compat_etree_fromstring, compat_html_entities, compat_http_client, compat_kwargs, compat_parse_qs, compat_socket_create_connection, compat_str, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlparse, compat_urllib_request, compat_urlparse, shlex_quote, ) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/20.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) if val: assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): # Here comes the crazy part: In 2.6, if the xpath is a unicode, # .//node does not match if a node is a direct child of . ! if isinstance(xpath, compat_str): xpath = xpath.encode('ascii') for f in node.findall(xpath): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): if sys.version_info < (2, 7): # Crazy 2.6 xpath = xpath.encode('ascii') return node.find(xpath) if isinstance(xpath, (str, compat_str)): n = _find_xpath(xpath) else: for xp in xpath: n = _find_xpath(xp) if n is not None: break if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute("id", id, html) def get_element_by_attribute(attribute, value, html): """Return the content of the tag with the specified attribute in the passed HTML document""" m = re.search(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), re.escape(value)), html) if not m: return None res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] return unescapeHTML(res) def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity): """Transforms an HTML entity to a character.""" # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 return compat_chr(int(numstr, base)) # Unknown entity in name, return its literal representation return ('&%s;' % entity) def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+);', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): # Both in bytes self.downloaded = downloaded self.expected = expected def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs[b'strict'] = True hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-No-Compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): return self.do_open(functools.partial( _create_http_connection, self, compat_http_client.HTTPConnection, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request new_req = req_type( url_escaped, data=req.data, headers=req.headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) new_req.timeout = req.timeout req = new_req for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) if 'Youtubedl-no-compression' in req.headers: if 'Accept-encoding' in req.headers: del req.headers['Accept-encoding'] del req.headers['Youtubedl-no-compression'] if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname return self.do_open(functools.partial( _create_http_connection, self, self._https_conn_class, True), req, **kwargs) class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: m = re.search( r'(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group(0))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) try: date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) except ValueError: pass def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # %z (UTC offset) is only supported in python>=3.2 if not re.match(r'^[0-9]{1,2}-[0-9]{1,2}-[0-9]{4}$', date_str): date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) format_expressions = [ '%d %B %Y', '%d %b %Y', '%B %d %Y', '%b %d %Y', '%b %dst %Y %I:%M%p', '%b %dnd %Y %I:%M%p', '%b %dth %Y %I:%M%p', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', ] if day_first: format_expressions.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) else: format_expressions.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) for expression in format_expressions: try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') return compat_str(upload_date) def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad aproximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, "%Y%m%d").date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( (b"GetStdHandle", ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)((b"WriteConsoleW", ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b"GetFileType", ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( (b"GetConsoleMode", ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ sdata = compat_urllib_parse.urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and inofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, } units_re = '|'.join(re.escape(u) for u in _UNIT_TABLE) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = _UNIT_TABLE[m.group('unit')] return int(float(num_str) * mult) def month_by_name(name): """ Return the number of a month by (locale-independently) English name """ try: return ENGLISH_MONTH_NAMES.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) try: libc = ctypes.cdll.LoadLibrary("libc.so.6") except OSError: return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): if s.startswith(start): return s[len(start):] return s def remove_end(s, end): if s.endswith(end): return s[:-len(end)] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] class HEADRequest(compat_urllib_request.Request): def get_method(self): return "HEAD" def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() m = re.match( r'''(?ix)(?:P?T)? (?: (?P<only_mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*| (?P<only_hours>[0-9.]+)\s*(?:hours?)| \s*(?P<hours_reversed>[0-9]+)\s*(?:[:h]|hours?)\s*(?P<mins_reversed>[0-9]+)\s*(?:[:m]|mins?\.?|minutes?)\s*| (?: (?: (?:(?P<days>[0-9]+)\s*(?:[:d]|days?)\s*)? (?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s* )? (?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s* )? (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)? )$''', s) if not m: return None res = 0 if m.group('only_mins'): return float_or_none(m.group('only_mins'), invscale=60) if m.group('only_hours'): return float_or_none(m.group('only_hours'), invscale=60 * 60) if m.group('secs'): res += int(m.group('secs')) if m.group('mins_reversed'): res += int(m.group('mins_reversed')) * 60 if m.group('mins'): res += int(m.group('mins')) * 60 if m.group('hours'): res += int(m.group('hours')) * 60 * 60 if m.group('hours_reversed'): res += int(m.group('hours_reversed')) * 60 * 60 if m.group('days'): res += int(m.group('days')) * 24 * 60 * 60 if m.group('ms'): res += float(m.group('ms')) return res def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize): self._pagefunc = pagefunc self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = list(self._pagefunc(pagenum)) startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() try: struct.pack('!I', 0) except TypeError: # In Python 2.6 (and some 2.7 versions), struct requires a bytes argument def struct_pack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.pack(spec, *args) def struct_unpack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.unpack(spec, *args) else: struct_pack = struct.pack struct_unpack = struct.unpack def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii') def encode_dict(d, encoding='utf-8'): return dict((k.encode(encoding), v.encode(encoding)) for k, v in d.items()) US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } def parse_age_limit(s): if s is None: return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) return int(m.group('age')) if m else US_RATINGS.get(s, None) def strip_jsonp(code): return re.sub( r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code) def js_to_json(code): def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v if v.startswith('"'): v = re.sub(r"\\'", "'", v[1:-1]) elif v.startswith("'"): v = v[1:-1] v = re.sub(r"\\\\|\\'|\"", lambda m: { '\\\\': '\\\\', "\\'": "'", '"': '\\"', }[m.group(0)], v) return '"%s"' % v res = re.sub(r'''(?x) "(?:[^"\\]*(?:\\\\|\\['"nu]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nu]))*[^'\\]*'| [a-zA-Z_][.a-zA-Z_0-9]* ''', fix_kv, code) res = re.sub(r',(\s*[\]}])', lambda m: m.group(1), res) return res def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(shlex_quote(a) for a in args) def mimetype2ext(mt): _, _, res = mt.rpartition('/') return { 'x-ms-wmv': 'wmv', 'x-mp4-fragmented': 'mp4', 'ttml+xml': 'ttml', }.get(res, res) def urlhandle_detect_ext(url_handle): try: url_handle.headers getheader = lambda h: url_handle.headers[h] except AttributeError: # Python < 3 getheader = url_handle.info().getheader cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] if m.group('strval') is not None: if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('strval') else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) actual_value = dct.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return 0.0 mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:\.\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3)) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): _x = functools.partial(xpath_with_ns, ns_map={ 'ttml': 'http://www.w3.org/ns/ttml', 'ttaf1': 'http://www.w3.org/2006/10/ttaf1', }) def parse_node(node): str_or_empty = functools.partial(str_or_none, default='') out = str_or_empty(node.text) for child in node: if child.tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'): out += '\n' + str_or_empty(child.tail) elif child.tag in (_x('ttml:span'), _x('ttaf1:span'), 'span'): out += str_or_empty(parse_node(child)) else: out += str_or_empty(xml.etree.ElementTree.tostring(child)) return out dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8')) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib['begin']) end_time = parse_dfxp_time_expr(para.attrib.get('end')) if not end_time: end_time = begin_time + parse_dfxp_time_expr(para.attrib['dur']) out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) return compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type) #!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals import base64 import calendar import codecs import contextlib import ctypes import datetime import email.utils import errno import functools import gzip import itertools import io import json import locale import math import operator import os import pipes import platform import re import ssl import socket import struct import subprocess import sys import tempfile import traceback import xml.etree.ElementTree import zlib from .compat import ( compat_basestring, compat_chr, compat_etree_fromstring, compat_html_entities, compat_http_client, compat_kwargs, compat_parse_qs, compat_socket_create_connection, compat_str, compat_urllib_error, compat_urllib_parse, compat_urllib_parse_urlparse, compat_urllib_request, compat_urlparse, shlex_quote, ) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/20.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-us,en;q=0.5', } NO_DEFAULT = object() ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(fn).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(fn).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. try: os.unlink(fn) except OSError: pass os.rename(tf.name, fn) except Exception: try: os.remove(tf.name) except OSError: pass raise if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) if val: assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val) expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val=None): # Here comes the crazy part: In 2.6, if the xpath is a unicode, # .//node does not match if a node is a direct child of . ! if isinstance(xpath, compat_str): xpath = xpath.encode('ascii') for f in node.findall(xpath): if key not in f.attrib: continue if val is None or f.attrib.get(key) == val: return f return None # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] for c in components: if len(c) == 1: replaced.append(c[0]) else: ns, tag = c replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): if sys.version_info < (2, 7): # Crazy 2.6 xpath = xpath.encode('ascii') return node.find(xpath) if isinstance(xpath, (str, compat_str)): n = _find_xpath(xpath) else: for xp in xpath: n = _find_xpath(xp) if n is not None: break if n is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element %s' % name) else: return None return n def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): n = xpath_element(node, xpath, name, fatal=fatal, default=default) if n is None or n == default: return n if n.text is None: if default is not NO_DEFAULT: return default elif fatal: name = xpath if name is None else name raise ExtractorError('Could not find XML element\'s text %s' % name) else: return None return n.text def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): n = find_xpath_attr(node, xpath, key) if n is None: if default is not NO_DEFAULT: return default elif fatal: name = '%s[@%s]' % (xpath, key) if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute("id", id, html) def get_element_by_attribute(attribute, value, html): """Return the content of the tag with the specified attribute in the passed HTML document""" m = re.search(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), re.escape(value)), html) if not m: return None res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] return unescapeHTML(res) def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip() def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename) def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res def _htmlentity_transform(entity): """Transforms an HTML entity to a character.""" # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 return compat_chr(int(numstr, base)) # Unknown entity in name, return its literal representation return ('&%s;' % entity) def unescapeHTML(s): if s is None: return None assert type(s) == compat_str return re.sub( r'&([^;]+);', lambda m: _htmlentity_transform(m.group(1)), s) def get_subprocess_encoding(): if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: # For subprocess calls, encode with locale encoding # Refer to http://stackoverflow.com/a/9951851/35070 encoding = preferredencoding() else: encoding = sys.getfilesystemencoding() if encoding is None: encoding = 'utf-8' return encoding def encodeFilename(s, for_subprocess=False): """ @param s The name of the file """ assert type(s) == compat_str # Python 3 has a Unicode API if sys.version_info >= (3, 0): return s # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: return s return s.encode(get_subprocess_encoding(), 'ignore') def decodeFilename(b, for_subprocess=False): if sys.version_info >= (3, 0): return b if not isinstance(b, bytes): return b return b.decode(get_subprocess_encoding(), 'ignore') def encodeArgument(s): if not isinstance(s, compat_str): # Legacy code that uses byte strings # Uncomment the following line after fixing all post processors # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) s = s.decode('ascii') return encodeFilename(s, True) def decodeArgument(b): return decodeFilename(b, True) def decodeOption(optval): if optval is None: return optval if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) assert isinstance(optval, compat_str) return optval def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) elif secs > 60: return '%d:%02d' % (secs // 60, secs % 60) else: return '%d' % secs def make_HTTPS_handler(params, **kwargs): opts_no_check_certificate = params.get('nocheckcertificate', False) if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if opts_no_check_certificate: context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return YoutubeDLHTTPSHandler(params, context=context, **kwargs) except TypeError: # Python 2.7.8 # (create_default_context present but HTTPSHandler has no context=) pass if sys.version_info < (3, 2): return YoutubeDLHTTPSHandler(params, **kwargs) else: # Python < 3.4 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) context.set_default_verify_paths() return YoutubeDLHTTPSHandler(params, context=context, **kwargs) def bug_reports_message(): if ytdl_is_updateable(): update_cmd = 'type youtube-dl -U to update' else: update_cmd = 'see https://yt-dl.org/update on how to update' msg = '; please report this issue on https://yt-dl.org/bug .' msg += ' Make sure you are using the latest version; %s.' % update_cmd msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' return msg class ExtractorError(Exception): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: msg = video_id + ': ' + msg if cause: msg += ' (caused by %r)' % cause if not expected: msg += bug_reports_message() super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception self.cause = cause self.video_id = video_id def format_traceback(self): if self.traceback is None: return None return ''.join(traceback.format_tb(self.traceback)) class UnsupportedError(ExtractorError): def __init__(self, url): super(UnsupportedError, self).__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url class RegexNotFoundError(ExtractorError): """Error when a regex didn't match""" pass class DownloadError(Exception): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not configured to continue on errors. They will contain the appropriate error message. """ def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) self.exc_info = exc_info class SameFileError(Exception): """Same File exception. This exception will be thrown by FileDownloader objects if they detect multiple files would have to be downloaded to the same file on disk. """ pass class PostProcessingError(Exception): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ def __init__(self, msg): self.msg = msg class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass class UnavailableVideoError(Exception): """Unavailable Format exception. This exception will be thrown when a video is requested in a format that is not available for that video. """ pass class ContentTooShortError(Exception): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they download is too small for what the server announced first, indicating the connection was probably interrupted. """ def __init__(self, downloaded, expected): # Both in bytes self.downloaded = downloaded self.expected = expected def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting # expected HTTP responses to meet HTTP/1.0 or later (see also # https://github.com/rg3/youtube-dl/issues/6727) if sys.version_info < (3, 0): kwargs[b'strict'] = True hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: sa = (source_address, 0) if hasattr(hc, 'source_address'): # Python 2.7+ hc.source_address = sa else: # Python 2.6 def _hc_connect(self, *args, **kwargs): sock = compat_socket_create_connection( (self.host, self.port), self.timeout, sa) if is_https: self.sock = ssl.wrap_socket( sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) else: self.sock = sock hc.connect = functools.partial(_hc_connect, hc) return hc class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has to include the HTTP header "Youtubedl-No-Compression", which will be removed before making the real request. Part of this code was copied from: http://techknack.net/python-urllib2-handlers/ Andrew Rowls, the author of that code, agreed to release it to the public domain. """ def __init__(self, params, *args, **kwargs): compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): return self.do_open(functools.partial( _create_http_connection, self, compat_http_client.HTTPConnection, False), req) @staticmethod def deflate(data): try: return zlib.decompress(data, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(data) @staticmethod def addinfourl_wrapper(stream, headers, url, code): if hasattr(compat_urllib_request.addinfourl, 'getcode'): return compat_urllib_request.addinfourl(stream, headers, url, code) ret = compat_urllib_request.addinfourl(stream, headers, url) ret.code = code return ret def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # always respected by websites, some tend to give out URLs with non percent-encoded # non-ASCII characters (see telemb.py, ard.py [#3412]) # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) # To work around aforementioned issue we will replace request's original URL with # percent-encoded one # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) # the code of this workaround has been moved here from YoutubeDL.urlopen() url = req.get_full_url() url_escaped = escape_url(url) # Substitute URL if any change after escaping if url != url_escaped: req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request new_req = req_type( url_escaped, data=req.data, headers=req.headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) new_req.timeout = req.timeout req = new_req for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) if 'Youtubedl-no-compression' in req.headers: if 'Accept-encoding' in req.headers: del req.headers['Accept-encoding'] del req.headers['Youtubedl-no-compression'] if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments req._Request__original = req._Request__original.partition('#')[0] req._Request__r_type = req._Request__r_type.partition('#')[0] return req def http_response(self, req, resp): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': content = resp.read() gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) except IOError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) except IOError: continue break else: raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 if sys.version_info >= (3, 0): location = location.encode('iso-8859-1').decode('utf-8') location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] resp.headers['Location'] = location_escaped return resp https_request = http_request https_response = http_response class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection self._params = params def https_open(self, req): kwargs = {} if hasattr(self, '_context'): # python > 2.6 kwargs['context'] = self._context if hasattr(self, '_check_hostname'): # python 3.x kwargs['check_hostname'] = self._check_hostname return self.do_open(functools.partial( _create_http_connection, self, self._https_conn_class, True), req, **kwargs) class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): def __init__(self, cookiejar=None): compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): # Python 2 will choke on next HTTP request in row if there are non-ASCII # characters in Set-Cookie HTTP header of last response (see # https://github.com/rg3/youtube-dl/issues/6769). # In order to at least prevent crashing we will percent encode Set-Cookie # header before HTTPCookieProcessor starts processing it. # if sys.version_info < (3, 0) and response.headers: # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): # set_cookie = response.headers.get(set_cookie_header) # if set_cookie: # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") # if set_cookie != set_cookie_escaped: # del response.headers[set_cookie_header] # response.headers[set_cookie_header] = set_cookie_escaped return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) https_request = compat_urllib_request.HTTPCookieProcessor.http_request https_response = http_response def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: m = re.search( r'(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() else: date_str = date_str[:-len(m.group(0))] if not m.group('sign'): timezone = datetime.timedelta() else: sign = 1 if m.group('sign') == '+' else -1 timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) try: date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) except ValueError: pass def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # %z (UTC offset) is only supported in python>=3.2 if not re.match(r'^[0-9]{1,2}-[0-9]{1,2}-[0-9]{4}$', date_str): date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str) # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) format_expressions = [ '%d %B %Y', '%d %b %Y', '%B %d %Y', '%b %d %Y', '%b %dst %Y %I:%M%p', '%b %dnd %Y %I:%M%p', '%b %dth %Y %I:%M%p', '%Y %m %d', '%Y-%m-%d', '%Y/%m/%d', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f0Z', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M', ] if day_first: format_expressions.extend([ '%d-%m-%Y', '%d.%m.%Y', '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', ]) else: format_expressions.extend([ '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y', '%m/%d/%y', '%m/%d/%Y %H:%M:%S', ]) for expression in format_expressions: try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') except ValueError: pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') if upload_date is not None: return compat_str(upload_date) def determine_ext(url, default_ext='unknown_video'): if url is None: return default_ext guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess else: return default_ext def subtitles_filename(filename, sub_lang, sub_format): return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad aproximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, "%Y%m%d").date() def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str class DateRange(object): """Represents a time interval between two dates""" def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: self.start = date_from_str(start) else: self.start = datetime.datetime.min.date() if end is not None: self.end = date_from_str(end) else: self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) @classmethod def day(cls, day): """Returns a range that only contains the given day""" return cls(day, day) def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end def __str__(self): return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() if isinstance(res, bytes): res = res.decode(preferredencoding()) assert isinstance(res, compat_str) return res def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = ctypes.WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( (b"GetStdHandle", ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)((b"WriteConsoleW", ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b"GetFileType", ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = ctypes.WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( (b"GetConsoleMode", ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True def write_string(s, out=None, encoding=None): if out is None: out = sys.stderr assert type(s) == compat_str if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): if _windows_write_string(s, out): return if ('b' in getattr(out, 'mode', '') or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr byt = s.encode(encoding or preferredencoding(), 'ignore') out.write(byt) elif hasattr(out, 'buffer'): enc = encoding or getattr(out, 'encoding', None) or preferredencoding() byt = s.encode(enc, 'ignore') out.buffer.write(byt) else: out.write(s) out.flush() def bytes_to_intlist(bs): if not bs: return [] if isinstance(bs[0], int): # Python 3 return list(bs) else: return [ord(c) for c in bs] def intlist_to_bytes(xs): if not xs: return b'' return struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking if sys.platform == 'win32': import ctypes.wintypes import msvcrt class OVERLAPPED(ctypes.Structure): _fields_ = [ ('Internal', ctypes.wintypes.LPVOID), ('InternalHigh', ctypes.wintypes.LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('hEvent', ctypes.wintypes.HANDLE), ] kernel32 = ctypes.windll.kernel32 LockFileEx = kernel32.LockFileEx LockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwFlags ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] LockFileEx.restype = ctypes.wintypes.BOOL UnlockFileEx = kernel32.UnlockFileEx UnlockFileEx.argtypes = [ ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.DWORD, # dwReserved ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh ctypes.POINTER(OVERLAPPED) # Overlapped ] UnlockFileEx.restype = ctypes.wintypes.BOOL whole_low = 0xffffffff whole_high = 0x7fffffff def _lock_file(f, exclusive): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) handle = msvcrt.get_osfhandle(f.fileno()) if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: import fcntl def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): fcntl.flock(f, fcntl.LOCK_UN) class locked_file(object): def __init__(self, filename, mode, encoding=None): assert mode in ['r', 'a', 'w'] self.f = io.open(filename, mode, encoding=encoding) self.mode = mode def __enter__(self): exclusive = self.mode != 'r' try: _lock_file(self.f, exclusive) except IOError: self.f.close() raise return self def __exit__(self, etype, value, traceback): try: _unlock_file(self.f) finally: self.f.close() def __iter__(self): return iter(self.f) def write(self, *args): return self.f.write(*args) def read(self, *args): return self.f.read(*args) def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' def shell_quote(args): quoted_args = [] encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) return ' '.join(quoted_args) def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ sdata = compat_urllib_parse.urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) return '%.2f%s' % (converted, suffix) def parse_filesize(s): if s is None: return None # The lower-case forms are of course incorrect and inofficial, # but we support those too _UNIT_TABLE = { 'B': 1, 'b': 1, 'KiB': 1024, 'KB': 1000, 'kB': 1024, 'Kb': 1000, 'MiB': 1024 ** 2, 'MB': 1000 ** 2, 'mB': 1024 ** 2, 'Mb': 1000 ** 2, 'GiB': 1024 ** 3, 'GB': 1000 ** 3, 'gB': 1024 ** 3, 'Gb': 1000 ** 3, 'TiB': 1024 ** 4, 'TB': 1000 ** 4, 'tB': 1024 ** 4, 'Tb': 1000 ** 4, 'PiB': 1024 ** 5, 'PB': 1000 ** 5, 'pB': 1024 ** 5, 'Pb': 1000 ** 5, 'EiB': 1024 ** 6, 'EB': 1000 ** 6, 'eB': 1024 ** 6, 'Eb': 1000 ** 6, 'ZiB': 1024 ** 7, 'ZB': 1000 ** 7, 'zB': 1024 ** 7, 'Zb': 1000 ** 7, 'YiB': 1024 ** 8, 'YB': 1000 ** 8, 'yB': 1024 ** 8, 'Yb': 1000 ** 8, } units_re = '|'.join(re.escape(u) for u in _UNIT_TABLE) m = re.match( r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)' % units_re, s) if not m: return None num_str = m.group('num').replace(',', '.') mult = _UNIT_TABLE[m.group('unit')] return int(float(num_str) * mult) def month_by_name(name): """ Return the number of a month by (locale-independently) English name """ try: return ENGLISH_MONTH_NAMES.index(name) + 1 except ValueError: return None def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str) def setproctitle(title): assert isinstance(title, compat_str) try: libc = ctypes.cdll.LoadLibrary("libc.so.6") except OSError: return title_bytes = title.encode('utf-8') buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: libc.prctl(15, buf, 0, 0, 0) except AttributeError: return # Strange libc, just skip this def remove_start(s, start): if s.startswith(start): return s[len(start):] return s def remove_end(s, end): if s.endswith(end): return s[:-len(end)] return s def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] class HEADRequest(compat_urllib_request.Request): def get_method(self): return "HEAD" def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) if v == '': v = None if v is None: return default try: return int(v) * invscale // scale except ValueError: return default def str_or_none(v, default=None): return default if v is None else compat_str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ if int_str is None: return None int_str = re.sub(r'[,\.\+]', '', int_str) return int(int_str) def float_or_none(v, scale=1, invscale=1, default=None): if v is None: return default try: return float(v) * invscale / scale except ValueError: return default def parse_duration(s): if not isinstance(s, compat_basestring): return None s = s.strip() m = re.match( r'''(?ix)(?:P?T)? (?: (?P<only_mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*| (?P<only_hours>[0-9.]+)\s*(?:hours?)| \s*(?P<hours_reversed>[0-9]+)\s*(?:[:h]|hours?)\s*(?P<mins_reversed>[0-9]+)\s*(?:[:m]|mins?\.?|minutes?)\s*| (?: (?: (?:(?P<days>[0-9]+)\s*(?:[:d]|days?)\s*)? (?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s* )? (?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s* )? (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)? )$''', s) if not m: return None res = 0 if m.group('only_mins'): return float_or_none(m.group('only_mins'), invscale=60) if m.group('only_hours'): return float_or_none(m.group('only_hours'), invscale=60 * 60) if m.group('secs'): res += int(m.group('secs')) if m.group('mins_reversed'): res += int(m.group('mins_reversed')) * 60 if m.group('mins'): res += int(m.group('mins')) * 60 if m.group('hours'): res += int(m.group('hours')) * 60 * 60 if m.group('hours_reversed'): res += int(m.group('hours_reversed')) * 60 * 60 if m.group('days'): res += int(m.group('days')) * 24 * 60 * 60 if m.group('ms'): res += float(m.group('ms')) return res def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( '{0}.{1}{2}'.format(name, ext, real_ext) if not expected_real_ext or real_ext[1:] == expected_real_ext else '{0}.{1}'.format(filename, ext)) def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return '{0}.{1}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() except OSError: return False return exe def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: out, _ = subprocess.Popen( [encodeArgument(exe)] + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False if isinstance(out, bytes): # Python 2.x out = out.decode('ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized) def detect_exe_version(output, version_re=None, unrecognized='present'): assert isinstance(output, compat_str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) if m: return m.group(1) else: return unrecognized class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) class OnDemandPagedList(PagedList): def __init__(self, pagefunc, pagesize): self._pagefunc = pagefunc self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize nextfirstid = pagenum * self._pagesize + self._pagesize if start >= nextfirstid: continue page_results = list(self._pagefunc(pagenum)) startv = ( start % self._pagesize if firstid <= start < nextfirstid else 0) endv = ( ((end - 1) % self._pagesize) + 1 if (end is not None and firstid <= end <= nextfirstid) else None) if startv != 0 or endv is not None: page_results = page_results[startv:endv] res.extend(page_results) # A little optimization - if current page is not "full", ie. does # not contain page_size videos then we can assume that this page # is the last one - there are no more ids on further pages - # i.e. no need to query again. if len(page_results) + startv < self._pagesize: break # If we got the whole page, but the next page is not interesting, # break out early as well if end == nextfirstid: break return res class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): self._pagefunc = pagefunc self._pagecount = pagecount self._pagesize = pagesize def getslice(self, start=0, end=None): res = [] start_page = start // self._pagesize end_page = ( self._pagecount if end is None else (end // self._pagesize + 1)) skip_elems = start - start_page * self._pagesize only_more = None if end is None else end - start for pagenum in range(start_page, end_page): page = list(self._pagefunc(pagenum)) if skip_elems: page = page[skip_elems:] skip_elems = None if only_more is not None: if len(page) < only_more: only_more -= len(page) else: page = page[:only_more] res.extend(page) break res.extend(page) return res def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\U[0-9a-fA-F]{8}', lambda m: unicode_escape(m.group(0))[0], s) def lowercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl() try: struct.pack('!I', 0) except TypeError: # In Python 2.6 (and some 2.7 versions), struct requires a bytes argument def struct_pack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.pack(spec, *args) def struct_unpack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.unpack(spec, *args) else: struct_pack = struct.pack struct_unpack = struct.unpack def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() if url.startswith(('#', ';', ']')): return False return url with contextlib.closing(batch_fd) as fd: return [url for url in map(fixup, fd) if url] def urlencode_postdata(*args, **kargs): return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii') def encode_dict(d, encoding='utf-8'): return dict((k.encode(encoding), v.encode(encoding)) for k, v in d.items()) US_RATINGS = { 'G': 0, 'PG': 10, 'PG-13': 13, 'R': 16, 'NC': 18, } def parse_age_limit(s): if s is None: return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) return int(m.group('age')) if m else US_RATINGS.get(s, None) def strip_jsonp(code): return re.sub( r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code) def js_to_json(code): def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): return v if v.startswith('"'): v = re.sub(r"\\'", "'", v[1:-1]) elif v.startswith("'"): v = v[1:-1] v = re.sub(r"\\\\|\\'|\"", lambda m: { '\\\\': '\\\\', "\\'": "'", '"': '\\"', }[m.group(0)], v) return '"%s"' % v res = re.sub(r'''(?x) "(?:[^"\\]*(?:\\\\|\\['"nu]))*[^"\\]*"| '(?:[^'\\]*(?:\\\\|\\['"nu]))*[^'\\]*'| [a-zA-Z_][.a-zA-Z_0-9]* ''', fix_kv, code) res = re.sub(r',(\s*[\]}])', lambda m: m.group(1), res) return res def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s def version_tuple(v): return tuple(int(e) for e in re.split(r'[-.]', v)) def is_outdated_version(version, limit, assume_new=True): if not version: return not assume_new try: return version_tuple(version) < version_tuple(limit) except ValueError: return not assume_new def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') def args_to_str(args): # Get a short string representation for a subprocess command return ' '.join(shlex_quote(a) for a in args) def mimetype2ext(mt): _, _, res = mt.rpartition('/') return { 'x-ms-wmv': 'wmv', 'x-mp4-fragmented': 'mp4', 'ttml+xml': 'ttml', }.get(res, res) def urlhandle_detect_ext(url_handle): try: url_handle.headers getheader = lambda h: url_handle.headers[h] except AttributeError: # Python < 3 getheader = url_handle.info().getheader cd = getheader('Content-Disposition') if cd: m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd) if m: e = determine_ext(m.group('filename'), default_ext=None) if e: return e return mimetype2ext(getheader('Content-Type')) def encode_data_uri(data, mime_type): return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s) def determine_protocol(info_dict): protocol = info_dict.get('protocol') if protocol is not None: return protocol url = info_dict['url'] if url.startswith('rtmp'): return 'rtmp' elif url.startswith('mms'): return 'mms' elif url.startswith('rtsp'): return 'rtsp' ext = determine_ext(url) if ext == 'm3u8': return 'm3u8' elif ext == 'f4m': return 'f4m' return compat_urllib_parse_urlparse(url).scheme def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table) def _match_one(filter_part, dct): COMPARISON_OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>[a-z_]+) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = COMPARISON_OPERATORS[m.group('op')] if m.group('strval') is not None: if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) comparison_value = m.group('strval') else: try: comparison_value = int(m.group('intval')) except ValueError: comparison_value = parse_filesize(m.group('intval')) if comparison_value is None: comparison_value = parse_filesize(m.group('intval') + 'B') if comparison_value is None: raise ValueError( 'Invalid integer value %r in filter part %r' % ( m.group('intval'), filter_part)) actual_value = dct.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) UNARY_OPERATORS = { '': lambda v: v is not None, '!': lambda v: v is None, } operator_rex = re.compile(r'''(?x)\s* (?P<op>%s)\s*(?P<key>[a-z_]+) \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) m = operator_rex.search(filter_part) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) return op(actual_value) raise ValueError('Invalid filter part %r' % filter_part) def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&')) def match_filter_func(filter_str): def _match_func(info_dict): if match_str(filter_str, info_dict): return None else: video_title = info_dict.get('title', info_dict.get('id', 'video')) return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) return _match_func def parse_dfxp_time_expr(time_expr): if not time_expr: return 0.0 mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:\.\d+)?)$', time_expr) if mobj: return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3)) def srt_subtitles_timecode(seconds): return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) def dfxp2srt(dfxp_data): _x = functools.partial(xpath_with_ns, ns_map={ 'ttml': 'http://www.w3.org/ns/ttml', 'ttaf1': 'http://www.w3.org/2006/10/ttaf1', }) def parse_node(node): str_or_empty = functools.partial(str_or_none, default='') out = str_or_empty(node.text) for child in node: if child.tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'): out += '\n' + str_or_empty(child.tail) elif child.tag in (_x('ttml:span'), _x('ttaf1:span'), 'span'): out += str_or_empty(parse_node(child)) else: out += str_or_empty(xml.etree.ElementTree.tostring(child)) return out dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8')) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib['begin']) end_time = parse_dfxp_time_expr(para.attrib.get('end')) if not end_time: end_time = begin_time + parse_dfxp_time_expr(para.attrib['dur']) out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out) def cli_option(params, command_option, param): param = params.get(param) return [command_option, param] if param is not None else [] def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) assert isinstance(param, bool) if separator: return [command_option + separator + (true_value if param else false_value)] return [command_option, true_value if param else false_value] def cli_valueless_option(params, command_option, param, expected_value=True): param = params.get(param) return [command_option] if param == expected_value else [] def cli_configuration_args(params, param, default=[]): ex_args = params.get(param) if ex_args is None: return default assert isinstance(ex_args, list) return ex_args class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } @classmethod def short2long(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) @classmethod def long2short(cls, code): """Convert language code from ISO 639-2/T to ISO 639-1""" for short_name, long_name in cls._lang_map.items(): if long_name == code: return short_name class ISO3166Utils(object): # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', 'AX': 'Åland Islands', 'AL': 'Albania', 'DZ': 'Algeria', 'AS': 'American Samoa', 'AD': 'Andorra', 'AO': 'Angola', 'AI': 'Anguilla', 'AQ': 'Antarctica', 'AG': 'Antigua and Barbuda', 'AR': 'Argentina', 'AM': 'Armenia', 'AW': 'Aruba', 'AU': 'Australia', 'AT': 'Austria', 'AZ': 'Azerbaijan', 'BS': 'Bahamas', 'BH': 'Bahrain', 'BD': 'Bangladesh', 'BB': 'Barbados', 'BY': 'Belarus', 'BE': 'Belgium', 'BZ': 'Belize', 'BJ': 'Benin', 'BM': 'Bermuda', 'BT': 'Bhutan', 'BO': 'Bolivia, Plurinational State of', 'BQ': 'Bonaire, Sint Eustatius and Saba', 'BA': 'Bosnia and Herzegovina', 'BW': 'Botswana', 'BV': 'Bouvet Island', 'BR': 'Brazil', 'IO': 'British Indian Ocean Territory', 'BN': 'Brunei Darussalam', 'BG': 'Bulgaria', 'BF': 'Burkina Faso', 'BI': 'Burundi', 'KH': 'Cambodia', 'CM': 'Cameroon', 'CA': 'Canada', 'CV': 'Cape Verde', 'KY': 'Cayman Islands', 'CF': 'Central African Republic', 'TD': 'Chad', 'CL': 'Chile', 'CN': 'China', 'CX': 'Christmas Island', 'CC': 'Cocos (Keeling) Islands', 'CO': 'Colombia', 'KM': 'Comoros', 'CG': 'Congo', 'CD': 'Congo, the Democratic Republic of the', 'CK': 'Cook Islands', 'CR': 'Costa Rica', 'CI': 'Côte d\'Ivoire', 'HR': 'Croatia', 'CU': 'Cuba', 'CW': 'Curaçao', 'CY': 'Cyprus', 'CZ': 'Czech Republic', 'DK': 'Denmark', 'DJ': 'Djibouti', 'DM': 'Dominica', 'DO': 'Dominican Republic', 'EC': 'Ecuador', 'EG': 'Egypt', 'SV': 'El Salvador', 'GQ': 'Equatorial Guinea', 'ER': 'Eritrea', 'EE': 'Estonia', 'ET': 'Ethiopia', 'FK': 'Falkland Islands (Malvinas)', 'FO': 'Faroe Islands', 'FJ': 'Fiji', 'FI': 'Finland', 'FR': 'France', 'GF': 'French Guiana', 'PF': 'French Polynesia', 'TF': 'French Southern Territories', 'GA': 'Gabon', 'GM': 'Gambia', 'GE': 'Georgia', 'DE': 'Germany', 'GH': 'Ghana', 'GI': 'Gibraltar', 'GR': 'Greece', 'GL': 'Greenland', 'GD': 'Grenada', 'GP': 'Guadeloupe', 'GU': 'Guam', 'GT': 'Guatemala', 'GG': 'Guernsey', 'GN': 'Guinea', 'GW': 'Guinea-Bissau', 'GY': 'Guyana', 'HT': 'Haiti', 'HM': 'Heard Island and McDonald Islands', 'VA': 'Holy See (Vatican City State)', 'HN': 'Honduras', 'HK': 'Hong Kong', 'HU': 'Hungary', 'IS': 'Iceland', 'IN': 'India', 'ID': 'Indonesia', 'IR': 'Iran, Islamic Republic of', 'IQ': 'Iraq', 'IE': 'Ireland', 'IM': 'Isle of Man', 'IL': 'Israel', 'IT': 'Italy', 'JM': 'Jamaica', 'JP': 'Japan', 'JE': 'Jersey', 'JO': 'Jordan', 'KZ': 'Kazakhstan', 'KE': 'Kenya', 'KI': 'Kiribati', 'KP': 'Korea, Democratic People\'s Republic of', 'KR': 'Korea, Republic of', 'KW': 'Kuwait', 'KG': 'Kyrgyzstan', 'LA': 'Lao People\'s Democratic Republic', 'LV': 'Latvia', 'LB': 'Lebanon', 'LS': 'Lesotho', 'LR': 'Liberia', 'LY': 'Libya', 'LI': 'Liechtenstein', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'MO': 'Macao', 'MK': 'Macedonia, the Former Yugoslav Republic of', 'MG': 'Madagascar', 'MW': 'Malawi', 'MY': 'Malaysia', 'MV': 'Maldives', 'ML': 'Mali', 'MT': 'Malta', 'MH': 'Marshall Islands', 'MQ': 'Martinique', 'MR': 'Mauritania', 'MU': 'Mauritius', 'YT': 'Mayotte', 'MX': 'Mexico', 'FM': 'Micronesia, Federated States of', 'MD': 'Moldova, Republic of', 'MC': 'Monaco', 'MN': 'Mongolia', 'ME': 'Montenegro', 'MS': 'Montserrat', 'MA': 'Morocco', 'MZ': 'Mozambique', 'MM': 'Myanmar', 'NA': 'Namibia', 'NR': 'Nauru', 'NP': 'Nepal', 'NL': 'Netherlands', 'NC': 'New Caledonia', 'NZ': 'New Zealand', 'NI': 'Nicaragua', 'NE': 'Niger', 'NG': 'Nigeria', 'NU': 'Niue', 'NF': 'Norfolk Island', 'MP': 'Northern Mariana Islands', 'NO': 'Norway', 'OM': 'Oman', 'PK': 'Pakistan', 'PW': 'Palau', 'PS': 'Palestine, State of', 'PA': 'Panama', 'PG': 'Papua New Guinea', 'PY': 'Paraguay', 'PE': 'Peru', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PT': 'Portugal', 'PR': 'Puerto Rico', 'QA': 'Qatar', 'RE': 'Réunion', 'RO': 'Romania', 'RU': 'Russian Federation', 'RW': 'Rwanda', 'BL': 'Saint Barthélemy', 'SH': 'Saint Helena, Ascension and Tristan da Cunha', 'KN': 'Saint Kitts and Nevis', 'LC': 'Saint Lucia', 'MF': 'Saint Martin (French part)', 'PM': 'Saint Pierre and Miquelon', 'VC': 'Saint Vincent and the Grenadines', 'WS': 'Samoa', 'SM': 'San Marino', 'ST': 'Sao Tome and Principe', 'SA': 'Saudi Arabia', 'SN': 'Senegal', 'RS': 'Serbia', 'SC': 'Seychelles', 'SL': 'Sierra Leone', 'SG': 'Singapore', 'SX': 'Sint Maarten (Dutch part)', 'SK': 'Slovakia', 'SI': 'Slovenia', 'SB': 'Solomon Islands', 'SO': 'Somalia', 'ZA': 'South Africa', 'GS': 'South Georgia and the South Sandwich Islands', 'SS': 'South Sudan', 'ES': 'Spain', 'LK': 'Sri Lanka', 'SD': 'Sudan', 'SR': 'Suriname', 'SJ': 'Svalbard and Jan Mayen', 'SZ': 'Swaziland', 'SE': 'Sweden', 'CH': 'Switzerland', 'SY': 'Syrian Arab Republic', 'TW': 'Taiwan, Province of China', 'TJ': 'Tajikistan', 'TZ': 'Tanzania, United Republic of', 'TH': 'Thailand', 'TL': 'Timor-Leste', 'TG': 'Togo', 'TK': 'Tokelau', 'TO': 'Tonga', 'TT': 'Trinidad and Tobago', 'TN': 'Tunisia', 'TR': 'Turkey', 'TM': 'Turkmenistan', 'TC': 'Turks and Caicos Islands', 'TV': 'Tuvalu', 'UG': 'Uganda', 'UA': 'Ukraine', 'AE': 'United Arab Emirates', 'GB': 'United Kingdom', 'US': 'United States', 'UM': 'United States Minor Outlying Islands', 'UY': 'Uruguay', 'UZ': 'Uzbekistan', 'VU': 'Vanuatu', 'VE': 'Venezuela, Bolivarian Republic of', 'VN': 'Viet Nam', 'VG': 'Virgin Islands, British', 'VI': 'Virgin Islands, U.S.', 'WF': 'Wallis and Futuna', 'EH': 'Western Sahara', 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', } @classmethod def short2full(cls, code): """Convert an ISO 3166-2 country code to the corresponding full name""" return cls._country_map.get(code.upper()) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) return compat_urllib_request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') if req_proxy is not None: proxy = req_proxy del req.headers['Ytdl-request-proxy'] if proxy == '__noproxy__': return None # No Proxy return compat_urllib_request.ProxyHandler.proxy_open( self, req, proxy, type)
BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-29-fixed/youtube-dl/youtube_dl/utils.py,BugsInPy/BugsInPy/temp/projects/youtube-dl/bug-29-buggy/youtube-dl/youtube_dl/utils.py
thefuck-bug-5
import re from thefuck.utils import replace_argument from thefuck.specific.git import git_support @git_support def match(command): return ('push' in command.script_parts and 'set-upstream' in command.output) def _get_upstream_option_index(command_parts): if '--set-upstream' in command_parts: return command_parts.index('--set-upstream') elif '-u' in command_parts: return command_parts.index('-u') else: return None @git_support def get_new_command(command): # If --set-upstream or -u are passed, remove it and its argument. This is # because the remaining arguments are concatenated onto the command suggested # by git, which includes --set-upstream and its argument command_parts = command.script_parts[:] upstream_option_index = _get_upstream_option_index(command_parts) if upstream_option_index is not None: command_parts.pop(upstream_option_index) # In case of `git push -u` we don't have next argument: if len(command_parts) > upstream_option_index: command_parts.pop(upstream_option_index) else: # the only non-qualified permitted options are the repository and refspec; git's # suggestion include them, so they won't be lost, but would be duplicated otherwise. push_idx = command_parts.index('push') + 1 while len(command_parts) > push_idx and command_parts[len(command_parts) - 1][0] != '-': command_parts.pop(len(command_parts) - 1) arguments = re.findall(r'git push (.*)', command.output)[0].replace("'", r"\'").strip() return replace_argument(" ".join(command_parts), 'push', 'push {}'.format(arguments)) import re from thefuck.utils import replace_argument from thefuck.specific.git import git_support @git_support def match(command): return ('push' in command.script_parts and 'git push --set-upstream' in command.output) def _get_upstream_option_index(command_parts): if '--set-upstream' in command_parts: return command_parts.index('--set-upstream') elif '-u' in command_parts: return command_parts.index('-u') else: return None @git_support def get_new_command(command): # If --set-upstream or -u are passed, remove it and its argument. This is # because the remaining arguments are concatenated onto the command suggested # by git, which includes --set-upstream and its argument command_parts = command.script_parts[:] upstream_option_index = _get_upstream_option_index(command_parts) if upstream_option_index is not None: command_parts.pop(upstream_option_index) # In case of `git push -u` we don't have next argument: if len(command_parts) > upstream_option_index: command_parts.pop(upstream_option_index) else: # the only non-qualified permitted options are the repository and refspec; git's # suggestion include them, so they won't be lost, but would be duplicated otherwise. push_idx = command_parts.index('push') + 1 while len(command_parts) > push_idx and command_parts[len(command_parts) - 1][0] != '-': command_parts.pop(len(command_parts) - 1) arguments = re.findall(r'git push (.*)', command.output)[0].replace("'", r"\'").strip() return replace_argument(" ".join(command_parts), 'push', 'push {}'.format(arguments))
BugsInPy/BugsInPy/temp/projects/thefuck/bug-5-fixed/thefuck/thefuck/rules/git_push.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-5-buggy/thefuck/thefuck/rules/git_push.py
thefuck-bug-12
from difflib import get_close_matches from thefuck.utils import get_all_executables, \ get_valid_history_without_current, get_closest from thefuck.specific.sudo import sudo_support @sudo_support def match(command): return (command.script_parts and 'not found' in command.stderr and bool(get_close_matches(command.script_parts[0], get_all_executables()))) def _get_used_executables(command): for script in get_valid_history_without_current(command): yield script.split(' ')[0] @sudo_support def get_new_command(command): old_command = command.script_parts[0] # One from history: already_used = get_closest( old_command, _get_used_executables(command), fallback_to_first=False) if already_used: new_cmds = [already_used] else: new_cmds = [] # Other from all executables: new_cmds += [cmd for cmd in get_close_matches(old_command, get_all_executables()) if cmd not in new_cmds] return [' '.join([new_command] + command.script_parts[1:]) for new_command in new_cmds] priority = 3000 from difflib import get_close_matches from thefuck.utils import get_all_executables, \ get_valid_history_without_current, get_closest, which from thefuck.specific.sudo import sudo_support @sudo_support def match(command): return (not which(command.script_parts[0]) and 'not found' in command.stderr and bool(get_close_matches(command.script_parts[0], get_all_executables()))) def _get_used_executables(command): for script in get_valid_history_without_current(command): yield script.split(' ')[0] @sudo_support def get_new_command(command): old_command = command.script_parts[0] # One from history: already_used = get_closest( old_command, _get_used_executables(command), fallback_to_first=False) if already_used: new_cmds = [already_used] else: new_cmds = [] # Other from all executables: new_cmds += [cmd for cmd in get_close_matches(old_command, get_all_executables()) if cmd not in new_cmds] return [' '.join([new_command] + command.script_parts[1:]) for new_command in new_cmds] priority = 3000
BugsInPy/BugsInPy/temp/projects/thefuck/bug-12-fixed/thefuck/thefuck/rules/no_command.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-12-buggy/thefuck/thefuck/rules/no_command.py
thefuck-bug-32
def match(command, settings): return 'ls' in command.script and not ('ls -' in command.script) def get_new_command(command, settings): command = command.script.split(' ') command[0] = 'ls -lah' return ' '.join(command) def match(command, settings): return (command.script == 'ls' or command.script.startswith('ls ') and not ('ls -' in command.script)) def get_new_command(command, settings): command = command.script.split(' ') command[0] = 'ls -lah' return ' '.join(command)
BugsInPy/BugsInPy/temp/projects/thefuck/bug-32-fixed/thefuck/thefuck/rules/ls_lah.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-32-buggy/thefuck/thefuck/rules/ls_lah.py
thefuck-bug-28
import re import os from thefuck.utils import memoize from thefuck import shells patterns = ( # js, node: '^ at {file}:{line}:{col}', # cargo: '^ {file}:{line}:{col}', # python, thefuck: '^ File "{file}", line {line}', # awk: '^awk: {file}:{line}:', # git '^fatal: bad config file line {line} in {file}', # llc: '^llc: {file}:{line}:{col}:', # lua: '^lua: {file}:{line}:', # fish: '^{file} \(line {line}\):', # bash, sh, ssh: '^{file}: line {line}: ', # ghc, make, ruby, zsh: '^{file}:{line}:', # cargo, clang, gcc, go, pep8, rustc: '^{file}:{line}:{col}', # perl: 'at {file} line {line}', ) # for the sake of readability do not use named groups above def _make_pattern(pattern): pattern = pattern.replace('{file}', '(?P<file>[^:\n]+)') pattern = pattern.replace('{line}', '(?P<line>[0-9]+)') pattern = pattern.replace('{col}', '(?P<col>[0-9]+)') return re.compile(pattern, re.MULTILINE) patterns = [_make_pattern(p) for p in patterns] @memoize def _search(stderr): for pattern in patterns: m = re.search(pattern, stderr) if m and os.path.isfile(m.group('file')): return m def match(command, settings): if 'EDITOR' not in os.environ: return False return _search(command.stderr) or _search(command.stdout) def get_new_command(command, settings): m = _search(command.stderr) or _search(command.stdout) # Note: there does not seem to be a standard for columns, so they are just # ignored for now editor_call = '{} {} +{}'.format(os.environ['EDITOR'], m.group('file'), m.group('line')) return shells.and_(editor_call, command.script) import re import os from thefuck.utils import memoize, wrap_settings from thefuck import shells # order is important: only the first match is considered patterns = ( # js, node: '^ at {file}:{line}:{col}', # cargo: '^ {file}:{line}:{col}', # python, thefuck: '^ File "{file}", line {line}', # awk: '^awk: {file}:{line}:', # git '^fatal: bad config file line {line} in {file}', # llc: '^llc: {file}:{line}:{col}:', # lua: '^lua: {file}:{line}:', # fish: '^{file} \\(line {line}\\):', # bash, sh, ssh: '^{file}: line {line}: ', # cargo, clang, gcc, go, pep8, rustc: '^{file}:{line}:{col}', # ghc, make, ruby, zsh: '^{file}:{line}:', # perl: 'at {file} line {line}', ) # for the sake of readability do not use named groups above def _make_pattern(pattern): pattern = pattern.replace('{file}', '(?P<file>[^:\n]+)') pattern = pattern.replace('{line}', '(?P<line>[0-9]+)') pattern = pattern.replace('{col}', '(?P<col>[0-9]+)') return re.compile(pattern, re.MULTILINE) patterns = [_make_pattern(p) for p in patterns] @memoize def _search(stderr): for pattern in patterns: m = re.search(pattern, stderr) if m and os.path.isfile(m.group('file')): return m def match(command, settings): if 'EDITOR' not in os.environ: return False return _search(command.stderr) or _search(command.stdout) @wrap_settings({'fixlinecmd': '{editor} {file} +{line}', 'fixcolcmd': None}) def get_new_command(command, settings): m = _search(command.stderr) or _search(command.stdout) # Note: there does not seem to be a standard for columns, so they are just # ignored by default if settings.fixcolcmd and 'col' in m.groupdict(): editor_call = settings.fixcolcmd.format(editor=os.environ['EDITOR'], file=m.group('file'), line=m.group('line'), col=m.group('col')) else: editor_call = settings.fixlinecmd.format(editor=os.environ['EDITOR'], file=m.group('file'), line=m.group('line')) return shells.and_(editor_call, command.script)
BugsInPy/BugsInPy/temp/projects/thefuck/bug-28-fixed/thefuck/thefuck/rules/fix_file.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-28-buggy/thefuck/thefuck/rules/fix_file.py
thefuck-bug-24
from collections import namedtuple from traceback import format_stack from .logs import debug Command = namedtuple('Command', ('script', 'stdout', 'stderr')) CorrectedCommand = namedtuple('CorrectedCommand', ('script', 'side_effect', 'priority')) Rule = namedtuple('Rule', ('name', 'match', 'get_new_command', 'enabled_by_default', 'side_effect', 'priority', 'requires_output')) class RulesNamesList(list): """Wrapper a top of list for storing rules names.""" def __contains__(self, item): return super(RulesNamesList, self).__contains__(item.name) class Settings(dict): def __getattr__(self, item): return self.get(item) def update(self, **kwargs): """ Returns new settings with values from `kwargs` for unset settings. """ conf = dict(kwargs) conf.update(self) return Settings(conf) class SortedCorrectedCommandsSequence(object): """List-like collection/wrapper around generator, that: - immediately gives access to the first commands through []; - realises generator and sorts commands on first access to other commands through [], or when len called. """ def __init__(self, commands, settings): self._settings = settings self._commands = commands self._cached = self._get_first_two_unique() self._realised = False def _get_first_two_unique(self): """Returns first two unique commands.""" try: first = next(self._commands) except StopIteration: return [] for command in self._commands: if command.script != first.script or \ command.side_effect != first.side_effect: return [first, command] return [first] def _remove_duplicates(self, corrected_commands): """Removes low-priority duplicates.""" commands = {(command.script, command.side_effect): command for command in sorted(corrected_commands, key=lambda command: -command.priority) if command.script != self._cached[0].script or command.side_effect != self._cached[0].side_effect} return commands.values() def _realise(self): """Realises generator, removes duplicates and sorts commands.""" commands = self._cached[1:] + list(self._commands) commands = self._remove_duplicates(commands) self._cached = [self._cached[0]] + sorted( commands, key=lambda corrected_command: corrected_command.priority) self._realised = True debug('SortedCommandsSequence was realised with: {}, after: {}'.format( self._cached, '\n'.join(format_stack())), self._settings) def __getitem__(self, item): if item != 0 and not self._realised: self._realise() return self._cached[item] def __bool__(self): return bool(self._cached) def __len__(self): if not self._realised: self._realise() return len(self._cached) def __iter__(self): if not self._realised: self._realise() return iter(self._cached) @property def is_multiple(self): return len(self._cached) > 1 from collections import namedtuple from traceback import format_stack from .logs import debug Command = namedtuple('Command', ('script', 'stdout', 'stderr')) Rule = namedtuple('Rule', ('name', 'match', 'get_new_command', 'enabled_by_default', 'side_effect', 'priority', 'requires_output')) class CorrectedCommand(object): def __init__(self, script, side_effect, priority): self.script = script self.side_effect = side_effect self.priority = priority def __eq__(self, other): """Ignores `priority` field.""" if isinstance(other, CorrectedCommand): return (other.script, other.side_effect) ==\ (self.script, self.side_effect) else: return False def __hash__(self): return (self.script, self.side_effect).__hash__() def __repr__(self): return 'CorrectedCommand(script={}, side_effect={}, priority={})'.format( self.script, self.side_effect, self.priority) class RulesNamesList(list): """Wrapper a top of list for storing rules names.""" def __contains__(self, item): return super(RulesNamesList, self).__contains__(item.name) class Settings(dict): def __getattr__(self, item): return self.get(item) def update(self, **kwargs): """ Returns new settings with values from `kwargs` for unset settings. """ conf = dict(kwargs) conf.update(self) return Settings(conf) class SortedCorrectedCommandsSequence(object): """List-like collection/wrapper around generator, that: - immediately gives access to the first commands through []; - realises generator and sorts commands on first access to other commands through [], or when len called. """ def __init__(self, commands, settings): self._settings = settings self._commands = commands self._cached = self._get_first_two_unique() self._realised = False def _get_first_two_unique(self): """Returns first two unique commands.""" try: first = next(self._commands) except StopIteration: return [] for command in self._commands: if command != first: return [first, command] return [first] def _remove_duplicates(self, corrected_commands): """Removes low-priority duplicates.""" commands = {command for command in sorted(corrected_commands, key=lambda command: -command.priority) if command.script != self._cached[0]} return commands def _realise(self): """Realises generator, removes duplicates and sorts commands.""" commands = self._cached[1:] + list(self._commands) commands = self._remove_duplicates(commands) self._cached = [self._cached[0]] + sorted( commands, key=lambda corrected_command: corrected_command.priority) self._realised = True debug('SortedCommandsSequence was realised with: {}, after: {}'.format( self._cached, '\n'.join(format_stack())), self._settings) def __getitem__(self, item): if item != 0 and not self._realised: self._realise() return self._cached[item] def __bool__(self): return bool(self._cached) def __len__(self): if not self._realised: self._realise() return len(self._cached) def __iter__(self): if not self._realised: self._realise() return iter(self._cached) @property def is_multiple(self): return len(self._cached) > 1
BugsInPy/BugsInPy/temp/projects/thefuck/bug-24-fixed/thefuck/thefuck/types.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-24-buggy/thefuck/thefuck/types.py
thefuck-bug-3
from subprocess import Popen, PIPE from time import time import os import sys import six from .. import logs from ..conf import settings from ..utils import DEVNULL, cache from .generic import Generic @cache('~/.config/fish/config.fish', '~/.config/fish/functions') def _get_functions(overridden): proc = Popen(['fish', '-ic', 'functions'], stdout=PIPE, stderr=DEVNULL) functions = proc.stdout.read().decode('utf-8').strip().split('\n') return {func: func for func in functions if func not in overridden} @cache('~/.config/fish/config.fish') def _get_aliases(overridden): aliases = {} proc = Popen(['fish', '-ic', 'alias'], stdout=PIPE, stderr=DEVNULL) alias_out = proc.stdout.read().decode('utf-8').strip() if not alias_out: return aliases for alias in alias_out.split('\n'): for separator in (' ', '='): split_alias = alias.replace('alias ', '', 1).split(separator, 1) if len(split_alias) == 2: name, value = split_alias break else: continue if name not in overridden: aliases[name] = value return aliases class Fish(Generic): def _get_overridden_aliases(self): overridden = os.environ.get('THEFUCK_OVERRIDDEN_ALIASES', os.environ.get('TF_OVERRIDDEN_ALIASES', '')) default = {'cd', 'grep', 'ls', 'man', 'open'} for alias in overridden.split(','): default.add(alias.strip()) return default def app_alias(self, alias_name): if settings.alter_history: alter_history = (' builtin history delete --exact' ' --case-sensitive -- $fucked_up_command\n' ' builtin history merge ^ /dev/null\n') else: alter_history = '' # It is VERY important to have the variables declared WITHIN the alias return ('function {0} -d "Correct your previous console command"\n' ' set -l fucked_up_command $history[1]\n' ' env TF_SHELL=fish TF_ALIAS={0} PYTHONIOENCODING=utf-8' ' thefuck $fucked_up_command | read -l unfucked_command\n' ' if [ "$unfucked_command" != "" ]\n' ' eval $unfucked_command\n{1}' ' end\n' 'end').format(alias_name, alter_history) def get_aliases(self): overridden = self._get_overridden_aliases() functions = _get_functions(overridden) raw_aliases = _get_aliases(overridden) functions.update(raw_aliases) return functions def _expand_aliases(self, command_script): aliases = self.get_aliases() binary = command_script.split(' ')[0] if binary in aliases and aliases[binary] != binary: return command_script.replace(binary, aliases[binary], 1) elif binary in aliases: return u'fish -ic "{}"'.format(command_script.replace('"', r'\"')) else: return command_script def _get_history_file_name(self): return os.path.expanduser('~/.config/fish/fish_history') def _get_history_line(self, command_script): return u'- cmd: {}\n when: {}\n'.format(command_script, int(time())) def _script_from_history(self, line): if '- cmd: ' in line: return line.split('- cmd: ', 1)[1] else: return '' def and_(self, *commands): return u'; and '.join(commands) def or_(self, *commands): return u'; or '.join(commands) def how_to_configure(self): return self._create_shell_configuration( content=u"thefuck --alias | source", path='~/.config/fish/config.fish', reload='fish') def info(self): """Returns the name and version of the current shell""" proc = Popen(['fish', '-c', 'echo $FISH_VERSION'], stdout=PIPE, stderr=DEVNULL) version = proc.stdout.read().decode('utf-8').strip() return u'Fish Shell {}'.format(version) def put_to_history(self, command): try: return self._put_to_history(command) except IOError: logs.exception("Can't update history", sys.exc_info()) def _put_to_history(self, command_script): """Puts command script to shell history.""" history_file_name = self._get_history_file_name() if os.path.isfile(history_file_name): with open(history_file_name, 'a') as history: entry = self._get_history_line(command_script) if six.PY2: history.write(entry.encode('utf-8')) else: history.write(entry) from subprocess import Popen, PIPE from time import time import os import sys import six from .. import logs from ..conf import settings from ..utils import DEVNULL, cache from .generic import Generic @cache('~/.config/fish/config.fish', '~/.config/fish/functions') def _get_functions(overridden): proc = Popen(['fish', '-ic', 'functions'], stdout=PIPE, stderr=DEVNULL) functions = proc.stdout.read().decode('utf-8').strip().split('\n') return {func: func for func in functions if func not in overridden} @cache('~/.config/fish/config.fish') def _get_aliases(overridden): aliases = {} proc = Popen(['fish', '-ic', 'alias'], stdout=PIPE, stderr=DEVNULL) alias_out = proc.stdout.read().decode('utf-8').strip() if not alias_out: return aliases for alias in alias_out.split('\n'): for separator in (' ', '='): split_alias = alias.replace('alias ', '', 1).split(separator, 1) if len(split_alias) == 2: name, value = split_alias break else: continue if name not in overridden: aliases[name] = value return aliases class Fish(Generic): def _get_overridden_aliases(self): overridden = os.environ.get('THEFUCK_OVERRIDDEN_ALIASES', os.environ.get('TF_OVERRIDDEN_ALIASES', '')) default = {'cd', 'grep', 'ls', 'man', 'open'} for alias in overridden.split(','): default.add(alias.strip()) return default def app_alias(self, alias_name): if settings.alter_history: alter_history = (' builtin history delete --exact' ' --case-sensitive -- $fucked_up_command\n' ' builtin history merge ^ /dev/null\n') else: alter_history = '' # It is VERY important to have the variables declared WITHIN the alias return ('function {0} -d "Correct your previous console command"\n' ' set -l fucked_up_command $history[1]\n' ' env TF_SHELL=fish TF_ALIAS={0} PYTHONIOENCODING=utf-8' ' thefuck $fucked_up_command | read -l unfucked_command\n' ' if [ "$unfucked_command" != "" ]\n' ' eval $unfucked_command\n{1}' ' end\n' 'end').format(alias_name, alter_history) def get_aliases(self): overridden = self._get_overridden_aliases() functions = _get_functions(overridden) raw_aliases = _get_aliases(overridden) functions.update(raw_aliases) return functions def _expand_aliases(self, command_script): aliases = self.get_aliases() binary = command_script.split(' ')[0] if binary in aliases and aliases[binary] != binary: return command_script.replace(binary, aliases[binary], 1) elif binary in aliases: return u'fish -ic "{}"'.format(command_script.replace('"', r'\"')) else: return command_script def _get_history_file_name(self): return os.path.expanduser('~/.config/fish/fish_history') def _get_history_line(self, command_script): return u'- cmd: {}\n when: {}\n'.format(command_script, int(time())) def _script_from_history(self, line): if '- cmd: ' in line: return line.split('- cmd: ', 1)[1] else: return '' def and_(self, *commands): return u'; and '.join(commands) def or_(self, *commands): return u'; or '.join(commands) def how_to_configure(self): return self._create_shell_configuration( content=u"thefuck --alias | source", path='~/.config/fish/config.fish', reload='fish') def info(self): """Returns the name and version of the current shell""" proc = Popen(['fish', '--version'], stdout=PIPE, stderr=DEVNULL) version = proc.stdout.read().decode('utf-8').split()[-1] return u'Fish Shell {}'.format(version) def put_to_history(self, command): try: return self._put_to_history(command) except IOError: logs.exception("Can't update history", sys.exc_info()) def _put_to_history(self, command_script): """Puts command script to shell history.""" history_file_name = self._get_history_file_name() if os.path.isfile(history_file_name): with open(history_file_name, 'a') as history: entry = self._get_history_line(command_script) if six.PY2: history.write(entry.encode('utf-8')) else: history.write(entry)
BugsInPy/BugsInPy/temp/projects/thefuck/bug-3-fixed/thefuck/thefuck/shells/fish.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-3-buggy/thefuck/thefuck/shells/fish.py
thefuck-bug-15
import re from thefuck.shells import shell from thefuck.specific.git import git_support @git_support def match(command): return ('did not match any file(s) known to git.' in command.stderr and "Did you forget to 'git add'?" in command.stderr) @git_support def get_new_command(command): missing_file = re.findall( r"error: pathspec '([^']*)' " r"did not match any file\(s\) known to git.", command.stderr)[0] formatme = shell.and_('git add -- {}', '{}') return formatme.format(missing_file, command.script) import re from thefuck.shells import shell from thefuck.specific.git import git_support @git_support def match(command): return 'did not match any file(s) known to git.' in command.stderr @git_support def get_new_command(command): missing_file = re.findall( r"error: pathspec '([^']*)' " r'did not match any file\(s\) known to git.', command.stderr)[0] formatme = shell.and_('git add -- {}', '{}') return formatme.format(missing_file, command.script)
BugsInPy/BugsInPy/temp/projects/thefuck/bug-15-fixed/thefuck/thefuck/rules/git_add.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-15-buggy/thefuck/thefuck/rules/git_add.py
thefuck-bug-31
from thefuck import utils @utils.git_support def match(command, settings): return ('git' in command.script and 'diff' in command.script and '--staged' not in command.script) @utils.git_support def get_new_command(command, settings): return '{} --staged'.format(command.script) from thefuck import utils @utils.git_support def match(command, settings): return ('git' in command.script and 'diff' in command.script and '--staged' not in command.script) @utils.git_support def get_new_command(command, settings): return command.script.replace(' diff', ' diff --staged')
BugsInPy/BugsInPy/temp/projects/thefuck/bug-31-fixed/thefuck/thefuck/rules/git_diff_staged.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-31-buggy/thefuck/thefuck/rules/git_diff_staged.py
thefuck-bug-11
from thefuck.utils import replace_argument from thefuck.specific.git import git_support @git_support def match(command): return ('push' in command.script and 'set-upstream' in command.stderr) @git_support def get_new_command(command): push_upstream = command.stderr.split('\n')[-3].strip().partition('git ')[2] return replace_argument(command.script, 'push', push_upstream) from thefuck.utils import replace_argument from thefuck.specific.git import git_support @git_support def match(command): return ('push' in command.script and 'set-upstream' in command.stderr) @git_support def get_new_command(command): push_upstream = command.stderr.split('\n')[-3].strip().partition('git ')[2] return replace_argument(command.script, 'push', push_upstream)
BugsInPy/BugsInPy/temp/projects/thefuck/bug-11-fixed/thefuck/thefuck/rules/git_push.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-11-buggy/thefuck/thefuck/rules/git_push.py
thefuck-bug-6
import re from thefuck.shells import shell from thefuck.specific.git import git_support from thefuck.utils import eager @git_support def match(command): return ("fatal: A branch named '" in command.output and " already exists." in command.output) @git_support @eager def get_new_command(command): branch_name = re.findall( r"fatal: A branch named '([^']*)' already exists.", command.output)[0] new_command_templates = [['git branch -d {0}', 'git branch {0}'], ['git branch -d {0}', 'git checkout -b {0}'], ['git branch -D {0}', 'git branch {0}'], ['git branch -D {0}', 'git checkout -b {0}'], ['git checkout {0}']] for new_command_template in new_command_templates: yield shell.and_(*new_command_template).format(branch_name) import re from thefuck.shells import shell from thefuck.specific.git import git_support from thefuck.utils import eager @git_support def match(command): return ("fatal: A branch named '" in command.output and "' already exists." in command.output) @git_support @eager def get_new_command(command): branch_name = re.findall( r"fatal: A branch named '(.+)' already exists.", command.output)[0] branch_name = branch_name.replace("'", r"\'") new_command_templates = [['git branch -d {0}', 'git branch {0}'], ['git branch -d {0}', 'git checkout -b {0}'], ['git branch -D {0}', 'git branch {0}'], ['git branch -D {0}', 'git checkout -b {0}'], ['git checkout {0}']] for new_command_template in new_command_templates: yield shell.and_(*new_command_template).format(branch_name)
BugsInPy/BugsInPy/temp/projects/thefuck/bug-6-fixed/thefuck/thefuck/rules/git_branch_exists.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-6-buggy/thefuck/thefuck/rules/git_branch_exists.py
thefuck-bug-16
import os from ..conf import settings from ..utils import memoize from .generic import Generic class Bash(Generic): def app_alias(self, fuck): alias = "TF_ALIAS={0}" \ " alias {0}='PYTHONIOENCODING=utf-8" \ " TF_CMD=$(TF_SHELL_ALIASES=$(alias) thefuck $(fc -ln -1)) && " \ " eval $TF_CMD".format(fuck) if settings.alter_history: return alias + " && history -s $TF_CMD'" else: return alias + "'" def _parse_alias(self, alias): name, value = alias.replace('alias ', '', 1).split('=', 1) if value[0] == value[-1] == '"' or value[0] == value[-1] == "'": value = value[1:-1] return name, value @memoize def get_aliases(self): raw_aliases = os.environ.get('TF_SHELL_ALIASES', '').split('\n') return dict(self._parse_alias(alias) for alias in raw_aliases if alias and '=' in alias) def _get_history_file_name(self): return os.environ.get("HISTFILE", os.path.expanduser('~/.bash_history')) def _get_history_line(self, command_script): return u'{}\n'.format(command_script) def how_to_configure(self): if os.path.join(os.path.expanduser('~'), '.bashrc'): config = '~/.bashrc' elif os.path.join(os.path.expanduser('~'), '.bash_profile'): config = '~/.bashrc' else: config = 'bash config' return 'eval $(thefuck --alias)', config import os from ..conf import settings from ..utils import memoize from .generic import Generic class Bash(Generic): def app_alias(self, fuck): # It is VERY important to have the variables declared WITHIN the alias alias = "alias {0}='TF_CMD=$(TF_ALIAS={0}" \ " PYTHONIOENCODING=utf-8" \ " TF_SHELL_ALIASES=$(alias)" \ " thefuck $(fc -ln -1)) &&" \ " eval $TF_CMD".format(fuck) if settings.alter_history: return alias + " && history -s $TF_CMD'" else: return alias + "'" def _parse_alias(self, alias): name, value = alias.replace('alias ', '', 1).split('=', 1) if value[0] == value[-1] == '"' or value[0] == value[-1] == "'": value = value[1:-1] return name, value @memoize def get_aliases(self): raw_aliases = os.environ.get('TF_SHELL_ALIASES', '').split('\n') return dict(self._parse_alias(alias) for alias in raw_aliases if alias and '=' in alias) def _get_history_file_name(self): return os.environ.get("HISTFILE", os.path.expanduser('~/.bash_history')) def _get_history_line(self, command_script): return u'{}\n'.format(command_script) def how_to_configure(self): if os.path.join(os.path.expanduser('~'), '.bashrc'): config = '~/.bashrc' elif os.path.join(os.path.expanduser('~'), '.bash_profile'): config = '~/.bashrc' else: config = 'bash config' return 'eval $(thefuck --alias)', config
BugsInPy/BugsInPy/temp/projects/thefuck/bug-16-fixed/thefuck/thefuck/shells/bash.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-16-buggy/thefuck/thefuck/shells/bash.py
thefuck-bug-14
from subprocess import Popen, PIPE from time import time import os from ..utils import DEVNULL, memoize, cache from .generic import Generic class Fish(Generic): def _get_overridden_aliases(self): overridden_aliases = os.environ.get('TF_OVERRIDDEN_ALIASES', '').strip() if overridden_aliases: return [alias.strip() for alias in overridden_aliases.split(',')] else: return ['cd', 'grep', 'ls', 'man', 'open'] def app_alias(self, fuck): # It is VERY important to have the variables declared WITHIN the alias return ('function {0} -d "Correct your previous console command"\n' ' set -l fucked_up_command $history[1]\n' ' env TF_ALIAS={0} PYTHONIOENCODING=utf-8' ' thefuck $fucked_up_command | read -l unfucked_command\n' ' if [ "$unfucked_command" != "" ]\n' ' eval $unfucked_command\n' ' history --delete $fucked_up_command\n' ' history --merge ^ /dev/null\n' ' end\n' 'end').format(fuck) @memoize @cache('.config/fish/config.fish', '.config/fish/functions') def get_aliases(self): overridden = self._get_overridden_aliases() proc = Popen(['fish', '-ic', 'functions'], stdout=PIPE, stderr=DEVNULL) functions = proc.stdout.read().decode('utf-8').strip().split('\n') return {func: func for func in functions if func not in overridden} def _expand_aliases(self, command_script): aliases = self.get_aliases() binary = command_script.split(' ')[0] if binary in aliases: return u'fish -ic "{}"'.format(command_script.replace('"', r'\"')) else: return command_script def from_shell(self, command_script): """Prepares command before running in app.""" return self._expand_aliases(command_script) def _get_history_file_name(self): return os.path.expanduser('~/.config/fish/fish_history') def _get_history_line(self, command_script): return u'- cmd: {}\n when: {}\n'.format(command_script, int(time())) def _script_from_history(self, line): if '- cmd: ' in line: return line.split('- cmd: ', 1)[1] else: return '' def and_(self, *commands): return u'; and '.join(commands) def how_to_configure(self): return (r"eval (thefuck --alias | tr '\n' ';')", '~/.config/fish/config.fish') from subprocess import Popen, PIPE from time import time import os from ..utils import DEVNULL, memoize, cache from .generic import Generic class Fish(Generic): def _get_overridden_aliases(self): default = {'cd', 'grep', 'ls', 'man', 'open'} for alias in os.environ.get('TF_OVERRIDDEN_ALIASES', '').split(','): default.add(alias.strip()) return default def app_alias(self, fuck): # It is VERY important to have the variables declared WITHIN the alias return ('function {0} -d "Correct your previous console command"\n' ' set -l fucked_up_command $history[1]\n' ' env TF_ALIAS={0} PYTHONIOENCODING=utf-8' ' thefuck $fucked_up_command | read -l unfucked_command\n' ' if [ "$unfucked_command" != "" ]\n' ' eval $unfucked_command\n' ' history --delete $fucked_up_command\n' ' history --merge ^ /dev/null\n' ' end\n' 'end').format(fuck) @memoize @cache('.config/fish/config.fish', '.config/fish/functions') def get_aliases(self): overridden = self._get_overridden_aliases() proc = Popen(['fish', '-ic', 'functions'], stdout=PIPE, stderr=DEVNULL) functions = proc.stdout.read().decode('utf-8').strip().split('\n') return {func: func for func in functions if func not in overridden} def _expand_aliases(self, command_script): aliases = self.get_aliases() binary = command_script.split(' ')[0] if binary in aliases: return u'fish -ic "{}"'.format(command_script.replace('"', r'\"')) else: return command_script def from_shell(self, command_script): """Prepares command before running in app.""" return self._expand_aliases(command_script) def _get_history_file_name(self): return os.path.expanduser('~/.config/fish/fish_history') def _get_history_line(self, command_script): return u'- cmd: {}\n when: {}\n'.format(command_script, int(time())) def _script_from_history(self, line): if '- cmd: ' in line: return line.split('- cmd: ', 1)[1] else: return '' def and_(self, *commands): return u'; and '.join(commands) def how_to_configure(self): return (r"eval (thefuck --alias | tr '\n' ';')", '~/.config/fish/config.fish')
BugsInPy/BugsInPy/temp/projects/thefuck/bug-14-fixed/thefuck/thefuck/shells/fish.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-14-buggy/thefuck/thefuck/shells/fish.py
thefuck-bug-9
from thefuck.utils import replace_argument from thefuck.specific.git import git_support @git_support def match(command): return ('push' in command.script and 'set-upstream' in command.stderr) @git_support def get_new_command(command): # If --set-upstream or -u are passed, remove it and its argument. This is # because the remaining arguments are concatenated onto the command suggested # by git, which includes --set-upstream and its argument upstream_option_index = -1 try: upstream_option_index = command.script_parts.index('--set-upstream') except ValueError: pass try: upstream_option_index = command.script_parts.index('-u') except ValueError: pass if upstream_option_index is not -1: command.script_parts.pop(upstream_option_index) command.script_parts.pop(upstream_option_index) push_upstream = command.stderr.split('\n')[-3].strip().partition('git ')[2] return replace_argument(" ".join(command.script_parts), 'push', push_upstream) from thefuck.utils import replace_argument from thefuck.specific.git import git_support @git_support def match(command): return ('push' in command.script and 'set-upstream' in command.stderr) @git_support def get_new_command(command): # If --set-upstream or -u are passed, remove it and its argument. This is # because the remaining arguments are concatenated onto the command suggested # by git, which includes --set-upstream and its argument upstream_option_index = -1 try: upstream_option_index = command.script_parts.index('--set-upstream') except ValueError: pass try: upstream_option_index = command.script_parts.index('-u') except ValueError: pass if upstream_option_index is not -1: command.script_parts.pop(upstream_option_index) try: command.script_parts.pop(upstream_option_index) except IndexError: # This happens for `git push -u` pass push_upstream = command.stderr.split('\n')[-3].strip().partition('git ')[2] return replace_argument(" ".join(command.script_parts), 'push', push_upstream)
BugsInPy/BugsInPy/temp/projects/thefuck/bug-9-fixed/thefuck/thefuck/rules/git_push.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-9-buggy/thefuck/thefuck/rules/git_push.py
thefuck-bug-26
from thefuck import shells def match(command, settings): return command.script.startswith('vagrant ') and 'run `vagrant up`' in command.stderr.lower() def get_new_command(command, settings): cmds = command.script.split(' ') machine = "" if len(cmds) >= 3: machine = cmds[2] return shells.and_("vagrant up " + machine, command.script) from thefuck import shells def match(command, settings): return command.script.startswith('vagrant ') and 'run `vagrant up`' in command.stderr.lower() def get_new_command(command, settings): cmds = command.script.split(' ') machine = None if len(cmds) >= 3: machine = cmds[2] startAllInstances = shells.and_("vagrant up", command.script) if machine is None: return startAllInstances else: return [ shells.and_("vagrant up " + machine, command.script), startAllInstances]
BugsInPy/BugsInPy/temp/projects/thefuck/bug-26-fixed/thefuck/thefuck/rules/vagrant_up.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-26-buggy/thefuck/thefuck/rules/vagrant_up.py
thefuck-bug-17
from subprocess import Popen, PIPE import os from ..conf import settings from ..utils import DEVNULL, memoize, cache from .generic import Generic class Bash(Generic): def app_alias(self, fuck): alias = "TF_ALIAS={0}" \ " alias {0}='PYTHONIOENCODING=utf-8" \ " TF_CMD=$(thefuck $(fc -ln -1)) && " \ " eval $TF_CMD".format(fuck) if settings.alter_history: return alias + " && history -s $TF_CMD'" else: return alias + "'" def _parse_alias(self, alias): name, value = alias.replace('alias ', '', 1).split('=', 1) if value[0] == value[-1] == '"' or value[0] == value[-1] == "'": value = value[1:-1] return name, value @memoize @cache('.bashrc', '.bash_profile') def get_aliases(self): proc = Popen(['bash', '-ic', 'alias'], stdout=PIPE, stderr=DEVNULL) return dict( self._parse_alias(alias) for alias in proc.stdout.read().decode('utf-8').split('\n') if alias and '=' in alias) def _get_history_file_name(self): return os.environ.get("HISTFILE", os.path.expanduser('~/.bash_history')) def _get_history_line(self, command_script): return u'{}\n'.format(command_script) def how_to_configure(self): if os.path.join(os.path.expanduser('~'), '.bashrc'): config = '~/.bashrc' elif os.path.join(os.path.expanduser('~'), '.bash_profile'): config = '~/.bashrc' else: config = 'bash config' return 'eval $(thefuck --alias)', config import os from ..conf import settings from ..utils import memoize from .generic import Generic class Bash(Generic): def app_alias(self, fuck): alias = "TF_ALIAS={0}" \ " alias {0}='PYTHONIOENCODING=utf-8" \ " TF_CMD=$(TF_SHELL_ALIASES=$(alias) thefuck $(fc -ln -1)) && " \ " eval $TF_CMD".format(fuck) if settings.alter_history: return alias + " && history -s $TF_CMD'" else: return alias + "'" def _parse_alias(self, alias): name, value = alias.replace('alias ', '', 1).split('=', 1) if value[0] == value[-1] == '"' or value[0] == value[-1] == "'": value = value[1:-1] return name, value @memoize def get_aliases(self): raw_aliases = os.environ.get('TF_SHELL_ALIASES', '').split('\n') return dict(self._parse_alias(alias) for alias in raw_aliases if alias and '=' in alias) def _get_history_file_name(self): return os.environ.get("HISTFILE", os.path.expanduser('~/.bash_history')) def _get_history_line(self, command_script): return u'{}\n'.format(command_script) def how_to_configure(self): if os.path.join(os.path.expanduser('~'), '.bashrc'): config = '~/.bashrc' elif os.path.join(os.path.expanduser('~'), '.bash_profile'): config = '~/.bashrc' else: config = 'bash config' return 'eval $(thefuck --alias)', config
BugsInPy/BugsInPy/temp/projects/thefuck/bug-17-fixed/thefuck/thefuck/shells/bash.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-17-buggy/thefuck/thefuck/shells/bash.py
thefuck-bug-21
from thefuck import utils from thefuck.utils import replace_argument from thefuck.specific.git import git_support @git_support def match(command): return (command.script.split()[1] == 'stash' and 'usage:' in command.stderr) # git's output here is too complicated to be parsed (see the test file) stash_commands = ( 'apply', 'branch', 'clear', 'drop', 'list', 'pop', 'save', 'show') @git_support def get_new_command(command): stash_cmd = command.script.split()[2] fixed = utils.get_closest(stash_cmd, stash_commands, fallback_to_first=False) if fixed is not None: return replace_argument(command.script, stash_cmd, fixed) else: cmd = command.script.split() cmd.insert(2, 'save') return ' '.join(cmd) from thefuck import utils from thefuck.utils import replace_argument from thefuck.specific.git import git_support @git_support def match(command): splited_script = command.script.split() if len(splited_script) > 1: return (splited_script[1] == 'stash' and 'usage:' in command.stderr) else: return False # git's output here is too complicated to be parsed (see the test file) stash_commands = ( 'apply', 'branch', 'clear', 'drop', 'list', 'pop', 'save', 'show') @git_support def get_new_command(command): stash_cmd = command.script.split()[2] fixed = utils.get_closest(stash_cmd, stash_commands, fallback_to_first=False) if fixed is not None: return replace_argument(command.script, stash_cmd, fixed) else: cmd = command.script.split() cmd.insert(2, 'save') return ' '.join(cmd)
BugsInPy/BugsInPy/temp/projects/thefuck/bug-21-fixed/thefuck/thefuck/rules/git_fix_stash.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-21-buggy/thefuck/thefuck/rules/git_fix_stash.py
thefuck-bug-19
from thefuck.utils import replace_argument from thefuck.specific.git import git_support @git_support def match(command): return ('push' in command.script and '! [rejected]' in command.stderr and 'failed to push some refs to' in command.stderr and 'Updates were rejected because the tip of your current branch is behind' in command.stderr) @git_support def get_new_command(command): return replace_argument(command.script, 'push', 'push --force') enabled_by_default = False from thefuck.utils import replace_argument from thefuck.specific.git import git_support @git_support def match(command): return ('push' in command.script and '! [rejected]' in command.stderr and 'failed to push some refs to' in command.stderr and 'Updates were rejected because the tip of your current branch is behind' in command.stderr) @git_support def get_new_command(command): return replace_argument(command.script, 'push', 'push --force-with-lease') enabled_by_default = False
BugsInPy/BugsInPy/temp/projects/thefuck/bug-19-fixed/thefuck/thefuck/rules/git_push_force.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-19-buggy/thefuck/thefuck/rules/git_push_force.py
thefuck-bug-10
from thefuck.utils import for_app @for_app('man', at_least=1) def match(command): return True def get_new_command(command): if '3' in command.script: return command.script.replace("3", "2") if '2' in command.script: return command.script.replace("2", "3") split_cmd2 = command.script_parts split_cmd3 = split_cmd2[:] split_cmd2.insert(1, ' 2 ') split_cmd3.insert(1, ' 3 ') last_arg = command.script_parts[-1] return [ last_arg + ' --help', "".join(split_cmd3), "".join(split_cmd2), ] from thefuck.utils import for_app @for_app('man', at_least=1) def match(command): return True def get_new_command(command): if '3' in command.script: return command.script.replace("3", "2") if '2' in command.script: return command.script.replace("2", "3") last_arg = command.script_parts[-1] help_command = last_arg + ' --help' # If there are no man pages for last_arg, suggest `last_arg --help` instead. # Otherwise, suggest `--help` after suggesting other man page sections. if command.stderr.strip() == 'No manual entry for ' + last_arg: return [help_command] split_cmd2 = command.script_parts split_cmd3 = split_cmd2[:] split_cmd2.insert(1, ' 2 ') split_cmd3.insert(1, ' 3 ') return [ "".join(split_cmd3), "".join(split_cmd2), help_command, ]
BugsInPy/BugsInPy/temp/projects/thefuck/bug-10-fixed/thefuck/thefuck/rules/man.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-10-buggy/thefuck/thefuck/rules/man.py
thefuck-bug-23
from difflib import get_close_matches from functools import wraps import shelve from decorator import decorator import tempfile import os import pickle import re from pathlib import Path import six DEVNULL = open(os.devnull, 'w') if six.PY2: from pipes import quote else: from shlex import quote def memoize(fn): """Caches previous calls to the function.""" memo = {} @wraps(fn) def wrapper(*args, **kwargs): key = pickle.dumps((args, kwargs)) if key not in memo or memoize.disabled: memo[key] = fn(*args, **kwargs) return memo[key] return wrapper memoize.disabled = False @memoize def which(program): """Returns `program` path or `None`.""" def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None def wrap_settings(params): """Adds default values to settings if it not presented. Usage: @wrap_settings({'apt': '/usr/bin/apt'}) def match(command, settings): print(settings.apt) """ def _wrap_settings(fn, command, settings): return fn(command, settings.update(**params)) return decorator(_wrap_settings) def get_closest(word, possibilities, n=3, cutoff=0.6, fallback_to_first=True): """Returns closest match or just first from possibilities.""" possibilities = list(possibilities) try: return get_close_matches(word, possibilities, n, cutoff)[0] except IndexError: if fallback_to_first: return possibilities[0] @memoize def get_all_executables(): from thefuck.shells import thefuck_alias, get_aliases def _safe(fn, fallback): try: return fn() except OSError: return fallback tf_alias = thefuck_alias() return [exe.name for path in os.environ.get('PATH', '').split(':') for exe in _safe(lambda: list(Path(path).iterdir()), []) if not _safe(exe.is_dir, True)] + [ alias for alias in get_aliases() if alias != tf_alias] def replace_argument(script, from_, to): """Replaces command line argument.""" replaced_in_the_end = re.sub(u' {}$'.format(from_), u' {}'.format(to), script, count=1) if replaced_in_the_end != script: return replaced_in_the_end else: return script.replace( u' {} '.format(from_), u' {} '.format(to), 1) @decorator def eager(fn, *args, **kwargs): return list(fn(*args, **kwargs)) @eager def get_all_matched_commands(stderr, separator='Did you mean'): should_yield = False for line in stderr.split('\n'): if separator in line: should_yield = True elif should_yield and line: yield line.strip() def replace_command(command, broken, matched): """Helper for *_no_command rules.""" new_cmds = get_close_matches(broken, matched, cutoff=0.1) return [replace_argument(command.script, broken, new_cmd.strip()) for new_cmd in new_cmds] @memoize def is_app(command, *app_names): """Returns `True` if command is call to one of passed app names.""" for name in app_names: if command.script == name \ or command.script.startswith(u'{} '.format(name)): return True return False def for_app(*app_names): """Specifies that matching script is for on of app names.""" def _for_app(fn, command, settings): if is_app(command, *app_names): return fn(command, settings) else: return False return decorator(_for_app) def cache(*depends_on): """Caches function result in temporary file. Cache will be expired when modification date of files from `depends_on` will be changed. Function wrapped in `cache` should be arguments agnostic. """ def _get_mtime(name): path = os.path.join(os.path.expanduser('~'), name) try: return str(os.path.getmtime(path)) except OSError: return '0' @decorator def _cache(fn, *args, **kwargs): if cache.disabled: return fn(*args, **kwargs) cache_path = os.path.join(tempfile.gettempdir(), '.thefuck-cache') key = '{}.{}'.format(fn.__module__, repr(fn).split('at')[0]) etag = '.'.join(_get_mtime(name) for name in depends_on) with shelve.open(cache_path) as db: if db.get(key, {}).get('etag') == etag: return db[key]['value'] else: value = fn(*args, **kwargs) db[key] = {'etag': etag, 'value': value} return value return _cache cache.disabled = False from difflib import get_close_matches from functools import wraps import shelve from decorator import decorator from contextlib import closing import tempfile import os import pickle import re from pathlib import Path import six DEVNULL = open(os.devnull, 'w') if six.PY2: from pipes import quote else: from shlex import quote def memoize(fn): """Caches previous calls to the function.""" memo = {} @wraps(fn) def wrapper(*args, **kwargs): key = pickle.dumps((args, kwargs)) if key not in memo or memoize.disabled: memo[key] = fn(*args, **kwargs) return memo[key] return wrapper memoize.disabled = False @memoize def which(program): """Returns `program` path or `None`.""" def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None def wrap_settings(params): """Adds default values to settings if it not presented. Usage: @wrap_settings({'apt': '/usr/bin/apt'}) def match(command, settings): print(settings.apt) """ def _wrap_settings(fn, command, settings): return fn(command, settings.update(**params)) return decorator(_wrap_settings) def get_closest(word, possibilities, n=3, cutoff=0.6, fallback_to_first=True): """Returns closest match or just first from possibilities.""" possibilities = list(possibilities) try: return get_close_matches(word, possibilities, n, cutoff)[0] except IndexError: if fallback_to_first: return possibilities[0] @memoize def get_all_executables(): from thefuck.shells import thefuck_alias, get_aliases def _safe(fn, fallback): try: return fn() except OSError: return fallback tf_alias = thefuck_alias() return [exe.name for path in os.environ.get('PATH', '').split(':') for exe in _safe(lambda: list(Path(path).iterdir()), []) if not _safe(exe.is_dir, True)] + [ alias for alias in get_aliases() if alias != tf_alias] def replace_argument(script, from_, to): """Replaces command line argument.""" replaced_in_the_end = re.sub(u' {}$'.format(from_), u' {}'.format(to), script, count=1) if replaced_in_the_end != script: return replaced_in_the_end else: return script.replace( u' {} '.format(from_), u' {} '.format(to), 1) @decorator def eager(fn, *args, **kwargs): return list(fn(*args, **kwargs)) @eager def get_all_matched_commands(stderr, separator='Did you mean'): should_yield = False for line in stderr.split('\n'): if separator in line: should_yield = True elif should_yield and line: yield line.strip() def replace_command(command, broken, matched): """Helper for *_no_command rules.""" new_cmds = get_close_matches(broken, matched, cutoff=0.1) return [replace_argument(command.script, broken, new_cmd.strip()) for new_cmd in new_cmds] @memoize def is_app(command, *app_names): """Returns `True` if command is call to one of passed app names.""" for name in app_names: if command.script == name \ or command.script.startswith(u'{} '.format(name)): return True return False def for_app(*app_names): """Specifies that matching script is for on of app names.""" def _for_app(fn, command, settings): if is_app(command, *app_names): return fn(command, settings) else: return False return decorator(_for_app) def cache(*depends_on): """Caches function result in temporary file. Cache will be expired when modification date of files from `depends_on` will be changed. Function wrapped in `cache` should be arguments agnostic. """ def _get_mtime(name): path = os.path.join(os.path.expanduser('~'), name) try: return str(os.path.getmtime(path)) except OSError: return '0' @decorator def _cache(fn, *args, **kwargs): if cache.disabled: return fn(*args, **kwargs) cache_path = os.path.join(tempfile.gettempdir(), '.thefuck-cache') # A bit obscure, but simplest way to generate unique key for # functions and methods in python 2 and 3: key = '{}.{}'.format(fn.__module__, repr(fn).split('at')[0]) etag = '.'.join(_get_mtime(name) for name in depends_on) with closing(shelve.open(cache_path)) as db: if db.get(key, {}).get('etag') == etag: return db[key]['value'] else: value = fn(*args, **kwargs) db[key] = {'etag': etag, 'value': value} return value return _cache cache.disabled = False
BugsInPy/BugsInPy/temp/projects/thefuck/bug-23-fixed/thefuck/thefuck/utils.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-23-buggy/thefuck/thefuck/utils.py
thefuck-bug-1
import re from thefuck.utils import replace_argument, for_app from thefuck.specific.sudo import sudo_support @sudo_support @for_app('pip', 'pip2', 'pip3') def match(command): return ('pip' in command.script and 'unknown command' in command.output and 'maybe you meant' in command.output) def get_new_command(command): broken_cmd = re.findall(r'ERROR: unknown command \"([a-z]+)\"', command.output)[0] new_cmd = re.findall(r'maybe you meant \"([a-z]+)\"', command.output)[0] return replace_argument(command.script, broken_cmd, new_cmd) import re from thefuck.utils import replace_argument, for_app from thefuck.specific.sudo import sudo_support @sudo_support @for_app('pip', 'pip2', 'pip3') def match(command): return ('pip' in command.script and 'unknown command' in command.output and 'maybe you meant' in command.output) def get_new_command(command): broken_cmd = re.findall(r'ERROR: unknown command "([^"]+)"', command.output)[0] new_cmd = re.findall(r'maybe you meant "([^"]+)"', command.output)[0] return replace_argument(command.script, broken_cmd, new_cmd)
BugsInPy/BugsInPy/temp/projects/thefuck/bug-1-fixed/thefuck/thefuck/rules/pip_unknown_command.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-1-buggy/thefuck/thefuck/rules/pip_unknown_command.py
thefuck-bug-7
from thefuck.utils import replace_argument, for_app @for_app('php') def match(command): return "php -s" in command.script def get_new_command(command): return replace_argument(command.script, "-s", "-S") requires_output = False from thefuck.utils import replace_argument, for_app @for_app('php') def match(command): return " -s " in command.script def get_new_command(command): return replace_argument(command.script, "-s", "-S")
BugsInPy/BugsInPy/temp/projects/thefuck/bug-7-fixed/thefuck/thefuck/rules/php_s.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-7-buggy/thefuck/thefuck/rules/php_s.py
thefuck-bug-18
patterns = ['permission denied', 'EACCES', 'pkg: Insufficient privileges', 'you cannot perform this operation unless you are root', 'non-root users cannot', 'Operation not permitted', 'root privilege', 'This command has to be run under the root user.', 'This operation requires root.', 'requested operation requires superuser privilege', 'must be run as root', 'must run as root', 'must be superuser', 'must be root', 'need to be root', 'need root', 'only root can ', 'You don\'t have access to the history DB.', 'authentication is required', 'eDSPermissionError'] def match(command): for pattern in patterns: if pattern.lower() in command.stderr.lower()\ or pattern.lower() in command.stdout.lower(): return True return False def get_new_command(command): if '>' in command.script: return u'sudo sh -c "{}"'.format(command.script.replace('"', '\\"')) else: return u'sudo {}'.format(command.script) patterns = ['permission denied', 'EACCES', 'pkg: Insufficient privileges', 'you cannot perform this operation unless you are root', 'non-root users cannot', 'Operation not permitted', 'root privilege', 'This command has to be run under the root user.', 'This operation requires root.', 'requested operation requires superuser privilege', 'must be run as root', 'must run as root', 'must be superuser', 'must be root', 'need to be root', 'need root', 'only root can ', 'You don\'t have access to the history DB.', 'authentication is required', 'eDSPermissionError'] def match(command): if command.script_parts and command.script_parts[0] == 'sudo': return False for pattern in patterns: if pattern.lower() in command.stderr.lower()\ or pattern.lower() in command.stdout.lower(): return True return False def get_new_command(command): if '>' in command.script: return u'sudo sh -c "{}"'.format(command.script.replace('"', '\\"')) else: return u'sudo {}'.format(command.script)
BugsInPy/BugsInPy/temp/projects/thefuck/bug-18-fixed/thefuck/thefuck/rules/sudo.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-18-buggy/thefuck/thefuck/rules/sudo.py
thefuck-bug-13
import re from thefuck.shells import shell from thefuck.specific.git import git_support from thefuck.utils import eager @git_support def match(command): return ('branch' in command.script and "fatal: A branch named '" in command.stderr and " already exists." in command.stderr) @git_support @eager def get_new_command(command): branch_name = re.findall( r"fatal: A branch named '([^']*)' already exists.", command.stderr)[0] new_command_templates = [['git branch -d {0}', 'git branch {0}'], ['git branch -D {0}', 'git branch {0}'], ['git checkout {0}']] for new_command_template in new_command_templates: yield shell.and_(*new_command_template).format(branch_name) import re from thefuck.shells import shell from thefuck.specific.git import git_support from thefuck.utils import eager @git_support def match(command): return ("fatal: A branch named '" in command.stderr and " already exists." in command.stderr) @git_support @eager def get_new_command(command): branch_name = re.findall( r"fatal: A branch named '([^']*)' already exists.", command.stderr)[0] new_command_templates = [['git branch -d {0}', 'git branch {0}'], ['git branch -d {0}', 'git checkout -b {0}'], ['git branch -D {0}', 'git branch {0}'], ['git branch -D {0}', 'git checkout -b {0}'], ['git checkout {0}']] for new_command_template in new_command_templates: yield shell.and_(*new_command_template).format(branch_name)
BugsInPy/BugsInPy/temp/projects/thefuck/bug-13-fixed/thefuck/thefuck/rules/git_branch_exists.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-13-buggy/thefuck/thefuck/rules/git_branch_exists.py
thefuck-bug-20
import os import zipfile from thefuck.utils import for_app def _is_bad_zip(file): with zipfile.ZipFile(file, 'r') as archive: return len(archive.namelist()) > 1 def _zip_file(command): # unzip works that way: # unzip [-flags] file[.zip] [file(s) ...] [-x file(s) ...] # ^ ^ files to unzip from the archive # archive to unzip for c in command.script.split()[1:]: if not c.startswith('-'): if c.endswith('.zip'): return c else: return '{}.zip'.format(c) @for_app('unzip') def match(command): return ('-d' not in command.script and _is_bad_zip(_zip_file(command))) def get_new_command(command): return '{} -d {}'.format(command.script, _zip_file(command)[:-4]) def side_effect(old_cmd, command): with zipfile.ZipFile(_zip_file(old_cmd), 'r') as archive: for file in archive.namelist(): try: os.remove(file) except OSError: # does not try to remove directories as we cannot know if they # already existed before pass requires_output = False import os import zipfile from thefuck.utils import for_app from thefuck.shells import quote def _is_bad_zip(file): with zipfile.ZipFile(file, 'r') as archive: return len(archive.namelist()) > 1 def _zip_file(command): # unzip works that way: # unzip [-flags] file[.zip] [file(s) ...] [-x file(s) ...] # ^ ^ files to unzip from the archive # archive to unzip for c in command.split_script[1:]: if not c.startswith('-'): if c.endswith('.zip'): return c else: return '{}.zip'.format(c) @for_app('unzip') def match(command): return ('-d' not in command.script and _is_bad_zip(_zip_file(command))) def get_new_command(command): return '{} -d {}'.format(command.script, quote(_zip_file(command)[:-4])) def side_effect(old_cmd, command): with zipfile.ZipFile(_zip_file(old_cmd), 'r') as archive: for file in archive.namelist(): try: os.remove(file) except OSError: # does not try to remove directories as we cannot know if they # already existed before pass requires_output = False
BugsInPy/BugsInPy/temp/projects/thefuck/bug-20-fixed/thefuck/thefuck/rules/dirty_unzip.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-20-buggy/thefuck/thefuck/rules/dirty_unzip.py
thefuck-bug-2
import atexit import os import pickle import re import shelve import sys import six from decorator import decorator from difflib import get_close_matches as difflib_get_close_matches from functools import wraps from .logs import warn, exception from .conf import settings from .system import Path DEVNULL = open(os.devnull, 'w') if six.PY2: import anydbm shelve_open_error = anydbm.error else: import dbm shelve_open_error = dbm.error def memoize(fn): """Caches previous calls to the function.""" memo = {} @wraps(fn) def wrapper(*args, **kwargs): if not memoize.disabled: key = pickle.dumps((args, kwargs)) if key not in memo: memo[key] = fn(*args, **kwargs) value = memo[key] else: # Memoize is disabled, call the function value = fn(*args, **kwargs) return value return wrapper memoize.disabled = False @memoize def which(program): """Returns `program` path or `None`.""" try: from shutil import which return which(program) except ImportError: def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None def default_settings(params): """Adds default values to settings if it not presented. Usage: @default_settings({'apt': '/usr/bin/apt'}) def match(command): print(settings.apt) """ def _default_settings(fn, command): for k, w in params.items(): settings.setdefault(k, w) return fn(command) return decorator(_default_settings) def get_closest(word, possibilities, cutoff=0.6, fallback_to_first=True): """Returns closest match or just first from possibilities.""" possibilities = list(possibilities) try: return difflib_get_close_matches(word, possibilities, 1, cutoff)[0] except IndexError: if fallback_to_first: return possibilities[0] def get_close_matches(word, possibilities, n=None, cutoff=0.6): """Overrides `difflib.get_close_match` to controle argument `n`.""" if n is None: n = settings.num_close_matches return difflib_get_close_matches(word, possibilities, n, cutoff) @memoize def get_all_executables(): from thefuck.shells import shell def _safe(fn, fallback): try: return fn() except OSError: return fallback tf_alias = get_alias() tf_entry_points = ['thefuck', 'fuck'] bins = [exe.name.decode('utf8') if six.PY2 else exe.name for path in os.environ.get('PATH', '').split(':') for exe in _safe(lambda: list(Path(path).iterdir()), []) if not _safe(exe.is_dir, True) and exe.name not in tf_entry_points] aliases = [alias.decode('utf8') if six.PY2 else alias for alias in shell.get_aliases() if alias != tf_alias] return bins + aliases def replace_argument(script, from_, to): """Replaces command line argument.""" replaced_in_the_end = re.sub(u' {}$'.format(re.escape(from_)), u' {}'.format(to), script, count=1) if replaced_in_the_end != script: return replaced_in_the_end else: return script.replace( u' {} '.format(from_), u' {} '.format(to), 1) @decorator def eager(fn, *args, **kwargs): return list(fn(*args, **kwargs)) @eager def get_all_matched_commands(stderr, separator='Did you mean'): if not isinstance(separator, list): separator = [separator] should_yield = False for line in stderr.split('\n'): for sep in separator: if sep in line: should_yield = True break else: if should_yield and line: yield line.strip() def replace_command(command, broken, matched): """Helper for *_no_command rules.""" new_cmds = get_close_matches(broken, matched, cutoff=0.1) return [replace_argument(command.script, broken, new_cmd.strip()) for new_cmd in new_cmds] @memoize def is_app(command, *app_names, **kwargs): """Returns `True` if command is call to one of passed app names.""" at_least = kwargs.pop('at_least', 0) if kwargs: raise TypeError("got an unexpected keyword argument '{}'".format(kwargs.keys())) if len(command.script_parts) > at_least: return command.script_parts[0] in app_names return False def for_app(*app_names, **kwargs): """Specifies that matching script is for on of app names.""" def _for_app(fn, command): if is_app(command, *app_names, **kwargs): return fn(command) else: return False return decorator(_for_app) class Cache(object): """Lazy read cache and save changes at exit.""" def __init__(self): self._db = None def _init_db(self): try: self._setup_db() except Exception: exception("Unable to init cache", sys.exc_info()) self._db = {} def _setup_db(self): cache_dir = self._get_cache_dir() cache_path = Path(cache_dir).joinpath('thefuck').as_posix() try: self._db = shelve.open(cache_path) except shelve_open_error + (ImportError,): # Caused when switching between Python versions warn("Removing possibly out-dated cache") os.remove(cache_path) self._db = shelve.open(cache_path) atexit.register(self._db.close) def _get_cache_dir(self): default_xdg_cache_dir = os.path.expanduser("~/.cache") cache_dir = os.getenv("XDG_CACHE_HOME", default_xdg_cache_dir) # Ensure the cache_path exists, Python 2 does not have the exist_ok # parameter try: os.makedirs(cache_dir) except OSError: if not os.path.isdir(cache_dir): raise return cache_dir def _get_mtime(self, path): try: return str(os.path.getmtime(path)) except OSError: return '0' def _get_key(self, fn, depends_on, args, kwargs): parts = (fn.__module__, repr(fn).split('at')[0], depends_on, args, kwargs) return str(pickle.dumps(parts)) def get_value(self, fn, depends_on, args, kwargs): if self._db is None: self._init_db() depends_on = [Path(name).expanduser().absolute().as_posix() for name in depends_on] key = self._get_key(fn, depends_on, args, kwargs) etag = '.'.join(self._get_mtime(path) for path in depends_on) if self._db.get(key, {}).get('etag') == etag: return self._db[key]['value'] else: value = fn(*args, **kwargs) self._db[key] = {'etag': etag, 'value': value} return value _cache = Cache() def cache(*depends_on): """Caches function result in temporary file. Cache will be expired when modification date of files from `depends_on` will be changed. Only functions should be wrapped in `cache`, not methods. """ def cache_decorator(fn): @memoize @wraps(fn) def wrapper(*args, **kwargs): if cache.disabled: return fn(*args, **kwargs) else: return _cache.get_value(fn, depends_on, args, kwargs) return wrapper return cache_decorator cache.disabled = False def get_installation_info(): import pkg_resources return pkg_resources.require('thefuck')[0] def get_alias(): return os.environ.get('TF_ALIAS', 'fuck') @memoize def get_valid_history_without_current(command): def _not_corrected(history, tf_alias): """Returns all lines from history except that comes before `fuck`.""" previous = None for line in history: if previous is not None and line != tf_alias: yield previous previous = line if history: yield history[-1] from thefuck.shells import shell history = shell.get_history() tf_alias = get_alias() executables = set(get_all_executables())\ .union(shell.get_builtin_commands()) return [line for line in _not_corrected(history, tf_alias) if not line.startswith(tf_alias) and not line == command.script and line.split(' ')[0] in executables] def format_raw_script(raw_script): """Creates single script from a list of script parts. :type raw_script: [basestring] :rtype: basestring """ if six.PY2: script = ' '.join(arg.decode('utf-8') for arg in raw_script) else: script = ' '.join(raw_script) return script.strip() import atexit import os import pickle import re import shelve import sys import six from decorator import decorator from difflib import get_close_matches as difflib_get_close_matches from functools import wraps from .logs import warn, exception from .conf import settings from .system import Path DEVNULL = open(os.devnull, 'w') if six.PY2: import anydbm shelve_open_error = anydbm.error else: import dbm shelve_open_error = dbm.error def memoize(fn): """Caches previous calls to the function.""" memo = {} @wraps(fn) def wrapper(*args, **kwargs): if not memoize.disabled: key = pickle.dumps((args, kwargs)) if key not in memo: memo[key] = fn(*args, **kwargs) value = memo[key] else: # Memoize is disabled, call the function value = fn(*args, **kwargs) return value return wrapper memoize.disabled = False @memoize def which(program): """Returns `program` path or `None`.""" try: from shutil import which return which(program) except ImportError: def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None def default_settings(params): """Adds default values to settings if it not presented. Usage: @default_settings({'apt': '/usr/bin/apt'}) def match(command): print(settings.apt) """ def _default_settings(fn, command): for k, w in params.items(): settings.setdefault(k, w) return fn(command) return decorator(_default_settings) def get_closest(word, possibilities, cutoff=0.6, fallback_to_first=True): """Returns closest match or just first from possibilities.""" possibilities = list(possibilities) try: return difflib_get_close_matches(word, possibilities, 1, cutoff)[0] except IndexError: if fallback_to_first: return possibilities[0] def get_close_matches(word, possibilities, n=None, cutoff=0.6): """Overrides `difflib.get_close_match` to controle argument `n`.""" if n is None: n = settings.num_close_matches return difflib_get_close_matches(word, possibilities, n, cutoff) @memoize def get_all_executables(): from thefuck.shells import shell def _safe(fn, fallback): try: return fn() except OSError: return fallback tf_alias = get_alias() tf_entry_points = ['thefuck', 'fuck'] bins = [exe.name.decode('utf8') if six.PY2 else exe.name for path in os.environ.get('PATH', '').split(os.pathsep) for exe in _safe(lambda: list(Path(path).iterdir()), []) if not _safe(exe.is_dir, True) and exe.name not in tf_entry_points] aliases = [alias.decode('utf8') if six.PY2 else alias for alias in shell.get_aliases() if alias != tf_alias] return bins + aliases def replace_argument(script, from_, to): """Replaces command line argument.""" replaced_in_the_end = re.sub(u' {}$'.format(re.escape(from_)), u' {}'.format(to), script, count=1) if replaced_in_the_end != script: return replaced_in_the_end else: return script.replace( u' {} '.format(from_), u' {} '.format(to), 1) @decorator def eager(fn, *args, **kwargs): return list(fn(*args, **kwargs)) @eager def get_all_matched_commands(stderr, separator='Did you mean'): if not isinstance(separator, list): separator = [separator] should_yield = False for line in stderr.split('\n'): for sep in separator: if sep in line: should_yield = True break else: if should_yield and line: yield line.strip() def replace_command(command, broken, matched): """Helper for *_no_command rules.""" new_cmds = get_close_matches(broken, matched, cutoff=0.1) return [replace_argument(command.script, broken, new_cmd.strip()) for new_cmd in new_cmds] @memoize def is_app(command, *app_names, **kwargs): """Returns `True` if command is call to one of passed app names.""" at_least = kwargs.pop('at_least', 0) if kwargs: raise TypeError("got an unexpected keyword argument '{}'".format(kwargs.keys())) if len(command.script_parts) > at_least: return command.script_parts[0] in app_names return False def for_app(*app_names, **kwargs): """Specifies that matching script is for on of app names.""" def _for_app(fn, command): if is_app(command, *app_names, **kwargs): return fn(command) else: return False return decorator(_for_app) class Cache(object): """Lazy read cache and save changes at exit.""" def __init__(self): self._db = None def _init_db(self): try: self._setup_db() except Exception: exception("Unable to init cache", sys.exc_info()) self._db = {} def _setup_db(self): cache_dir = self._get_cache_dir() cache_path = Path(cache_dir).joinpath('thefuck').as_posix() try: self._db = shelve.open(cache_path) except shelve_open_error + (ImportError,): # Caused when switching between Python versions warn("Removing possibly out-dated cache") os.remove(cache_path) self._db = shelve.open(cache_path) atexit.register(self._db.close) def _get_cache_dir(self): default_xdg_cache_dir = os.path.expanduser("~/.cache") cache_dir = os.getenv("XDG_CACHE_HOME", default_xdg_cache_dir) # Ensure the cache_path exists, Python 2 does not have the exist_ok # parameter try: os.makedirs(cache_dir) except OSError: if not os.path.isdir(cache_dir): raise return cache_dir def _get_mtime(self, path): try: return str(os.path.getmtime(path)) except OSError: return '0' def _get_key(self, fn, depends_on, args, kwargs): parts = (fn.__module__, repr(fn).split('at')[0], depends_on, args, kwargs) return str(pickle.dumps(parts)) def get_value(self, fn, depends_on, args, kwargs): if self._db is None: self._init_db() depends_on = [Path(name).expanduser().absolute().as_posix() for name in depends_on] key = self._get_key(fn, depends_on, args, kwargs) etag = '.'.join(self._get_mtime(path) for path in depends_on) if self._db.get(key, {}).get('etag') == etag: return self._db[key]['value'] else: value = fn(*args, **kwargs) self._db[key] = {'etag': etag, 'value': value} return value _cache = Cache() def cache(*depends_on): """Caches function result in temporary file. Cache will be expired when modification date of files from `depends_on` will be changed. Only functions should be wrapped in `cache`, not methods. """ def cache_decorator(fn): @memoize @wraps(fn) def wrapper(*args, **kwargs): if cache.disabled: return fn(*args, **kwargs) else: return _cache.get_value(fn, depends_on, args, kwargs) return wrapper return cache_decorator cache.disabled = False def get_installation_info(): import pkg_resources return pkg_resources.require('thefuck')[0] def get_alias(): return os.environ.get('TF_ALIAS', 'fuck') @memoize def get_valid_history_without_current(command): def _not_corrected(history, tf_alias): """Returns all lines from history except that comes before `fuck`.""" previous = None for line in history: if previous is not None and line != tf_alias: yield previous previous = line if history: yield history[-1] from thefuck.shells import shell history = shell.get_history() tf_alias = get_alias() executables = set(get_all_executables())\ .union(shell.get_builtin_commands()) return [line for line in _not_corrected(history, tf_alias) if not line.startswith(tf_alias) and not line == command.script and line.split(' ')[0] in executables] def format_raw_script(raw_script): """Creates single script from a list of script parts. :type raw_script: [basestring] :rtype: basestring """ if six.PY2: script = ' '.join(arg.decode('utf-8') for arg in raw_script) else: script = ' '.join(raw_script) return script.strip()
BugsInPy/BugsInPy/temp/projects/thefuck/bug-2-fixed/thefuck/thefuck/utils.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-2-buggy/thefuck/thefuck/utils.py
thefuck-bug-25
import re from thefuck.utils import sudo_support @sudo_support def match(command, settings): return ('mkdir' in command.script and 'No such file or directory' in command.stderr) @sudo_support def get_new_command(command, settings): return re.sub('^mkdir (.*)', 'mkdir -p \\1', command.script) import re from thefuck.utils import sudo_support @sudo_support def match(command, settings): return ('mkdir' in command.script and 'No such file or directory' in command.stderr) @sudo_support def get_new_command(command, settings): return re.sub('\\bmkdir (.*)', 'mkdir -p \\1', command.script)
BugsInPy/BugsInPy/temp/projects/thefuck/bug-25-fixed/thefuck/thefuck/rules/mkdir_p.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-25-buggy/thefuck/thefuck/rules/mkdir_p.py
thefuck-bug-4
from subprocess import Popen, PIPE from time import time import os import sys import six from .. import logs from ..conf import settings from ..utils import DEVNULL, cache from .generic import Generic @cache('~/.config/fish/config.fish', '~/.config/fish/functions') def _get_functions(overridden): proc = Popen(['fish', '-ic', 'functions'], stdout=PIPE, stderr=DEVNULL) functions = proc.stdout.read().decode('utf-8').strip().split('\n') return {func: func for func in functions if func not in overridden} @cache('~/.config/fish/config.fish') def _get_aliases(overridden): aliases = {} proc = Popen(['fish', '-ic', 'alias'], stdout=PIPE, stderr=DEVNULL) alias_out = proc.stdout.read().decode('utf-8').strip().split('\n') for alias in alias_out: name, value = alias.replace('alias ', '', 1).split(' ', 1) if name not in overridden: aliases[name] = value return aliases class Fish(Generic): def _get_overridden_aliases(self): overridden = os.environ.get('THEFUCK_OVERRIDDEN_ALIASES', os.environ.get('TF_OVERRIDDEN_ALIASES', '')) default = {'cd', 'grep', 'ls', 'man', 'open'} for alias in overridden.split(','): default.add(alias.strip()) return default def app_alias(self, alias_name): if settings.alter_history: alter_history = (' builtin history delete --exact' ' --case-sensitive -- $fucked_up_command\n' ' builtin history merge ^ /dev/null\n') else: alter_history = '' # It is VERY important to have the variables declared WITHIN the alias return ('function {0} -d "Correct your previous console command"\n' ' set -l fucked_up_command $history[1]\n' ' env TF_SHELL=fish TF_ALIAS={0} PYTHONIOENCODING=utf-8' ' thefuck $fucked_up_command | read -l unfucked_command\n' ' if [ "$unfucked_command" != "" ]\n' ' eval $unfucked_command\n{1}' ' end\n' 'end').format(alias_name, alter_history) def get_aliases(self): overridden = self._get_overridden_aliases() functions = _get_functions(overridden) raw_aliases = _get_aliases(overridden) functions.update(raw_aliases) return functions def _expand_aliases(self, command_script): aliases = self.get_aliases() binary = command_script.split(' ')[0] if binary in aliases and aliases[binary] != binary: return command_script.replace(binary, aliases[binary], 1) elif binary in aliases: return u'fish -ic "{}"'.format(command_script.replace('"', r'\"')) else: return command_script def _get_history_file_name(self): return os.path.expanduser('~/.config/fish/fish_history') def _get_history_line(self, command_script): return u'- cmd: {}\n when: {}\n'.format(command_script, int(time())) def _script_from_history(self, line): if '- cmd: ' in line: return line.split('- cmd: ', 1)[1] else: return '' def and_(self, *commands): return u'; and '.join(commands) def or_(self, *commands): return u'; or '.join(commands) def how_to_configure(self): return self._create_shell_configuration( content=u"thefuck --alias | source", path='~/.config/fish/config.fish', reload='fish') def put_to_history(self, command): try: return self._put_to_history(command) except IOError: logs.exception("Can't update history", sys.exc_info()) def _put_to_history(self, command_script): """Puts command script to shell history.""" history_file_name = self._get_history_file_name() if os.path.isfile(history_file_name): with open(history_file_name, 'a') as history: entry = self._get_history_line(command_script) if six.PY2: history.write(entry.encode('utf-8')) else: history.write(entry) from subprocess import Popen, PIPE from time import time import os import sys import six from .. import logs from ..conf import settings from ..utils import DEVNULL, cache from .generic import Generic @cache('~/.config/fish/config.fish', '~/.config/fish/functions') def _get_functions(overridden): proc = Popen(['fish', '-ic', 'functions'], stdout=PIPE, stderr=DEVNULL) functions = proc.stdout.read().decode('utf-8').strip().split('\n') return {func: func for func in functions if func not in overridden} @cache('~/.config/fish/config.fish') def _get_aliases(overridden): aliases = {} proc = Popen(['fish', '-ic', 'alias'], stdout=PIPE, stderr=DEVNULL) alias_out = proc.stdout.read().decode('utf-8').strip() if not alias_out: return aliases for alias in alias_out.split('\n'): for separator in (' ', '='): split_alias = alias.replace('alias ', '', 1).split(separator, 1) if len(split_alias) == 2: name, value = split_alias break else: continue if name not in overridden: aliases[name] = value return aliases class Fish(Generic): def _get_overridden_aliases(self): overridden = os.environ.get('THEFUCK_OVERRIDDEN_ALIASES', os.environ.get('TF_OVERRIDDEN_ALIASES', '')) default = {'cd', 'grep', 'ls', 'man', 'open'} for alias in overridden.split(','): default.add(alias.strip()) return default def app_alias(self, alias_name): if settings.alter_history: alter_history = (' builtin history delete --exact' ' --case-sensitive -- $fucked_up_command\n' ' builtin history merge ^ /dev/null\n') else: alter_history = '' # It is VERY important to have the variables declared WITHIN the alias return ('function {0} -d "Correct your previous console command"\n' ' set -l fucked_up_command $history[1]\n' ' env TF_SHELL=fish TF_ALIAS={0} PYTHONIOENCODING=utf-8' ' thefuck $fucked_up_command | read -l unfucked_command\n' ' if [ "$unfucked_command" != "" ]\n' ' eval $unfucked_command\n{1}' ' end\n' 'end').format(alias_name, alter_history) def get_aliases(self): overridden = self._get_overridden_aliases() functions = _get_functions(overridden) raw_aliases = _get_aliases(overridden) functions.update(raw_aliases) return functions def _expand_aliases(self, command_script): aliases = self.get_aliases() binary = command_script.split(' ')[0] if binary in aliases and aliases[binary] != binary: return command_script.replace(binary, aliases[binary], 1) elif binary in aliases: return u'fish -ic "{}"'.format(command_script.replace('"', r'\"')) else: return command_script def _get_history_file_name(self): return os.path.expanduser('~/.config/fish/fish_history') def _get_history_line(self, command_script): return u'- cmd: {}\n when: {}\n'.format(command_script, int(time())) def _script_from_history(self, line): if '- cmd: ' in line: return line.split('- cmd: ', 1)[1] else: return '' def and_(self, *commands): return u'; and '.join(commands) def or_(self, *commands): return u'; or '.join(commands) def how_to_configure(self): return self._create_shell_configuration( content=u"thefuck --alias | source", path='~/.config/fish/config.fish', reload='fish') def put_to_history(self, command): try: return self._put_to_history(command) except IOError: logs.exception("Can't update history", sys.exc_info()) def _put_to_history(self, command_script): """Puts command script to shell history.""" history_file_name = self._get_history_file_name() if os.path.isfile(history_file_name): with open(history_file_name, 'a') as history: entry = self._get_history_line(command_script) if six.PY2: history.write(entry.encode('utf-8')) else: history.write(entry)
BugsInPy/BugsInPy/temp/projects/thefuck/bug-4-fixed/thefuck/thefuck/shells/fish.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-4-buggy/thefuck/thefuck/shells/fish.py
thefuck-bug-30
import re import os from thefuck.utils import memoize from thefuck import shells patterns = ( # js, node: '^ at {file}:{line}:{col}', # cargo: '^ {file}:{line}:{col}', # python, thefuck: '^ File "{file}", line {line}', # awk: '^awk: {file}:{line}:', # git '^fatal: bad config file line {line} in {file}', # llc: '^llc: {file}:{line}:{col}:', # lua: '^lua: {file}:{line}:', # fish: '^{file} \(line {line}\):', # bash, sh, ssh: '^{file}: line {line}: ', # ghc, make, ruby, zsh: '^{file}:{line}:', # cargo, clang, gcc, go, rustc: '^{file}:{line}:{col}', # perl: 'at {file} line {line}', ) # for the sake of readability do not use named groups above def _make_pattern(pattern): pattern = pattern.replace('{file}', '(?P<file>[^:\n]+)') pattern = pattern.replace('{line}', '(?P<line>[0-9]+)') pattern = pattern.replace('{col}', '(?P<col>[0-9]+)') return re.compile(pattern, re.MULTILINE) patterns = [_make_pattern(p) for p in patterns] @memoize def _search(stderr): for pattern in patterns: m = re.search(pattern, stderr) if m: return m def match(command, settings): return 'EDITOR' in os.environ and _search(command.stderr) def get_new_command(command, settings): m = _search(command.stderr) # Note: there does not seem to be a standard for columns, so they are just # ignored for now return shells.and_('{} {} +{}'.format(os.environ['EDITOR'], m.group('file'), m.group('line')), command.script) import re import os from thefuck.utils import memoize from thefuck import shells patterns = ( # js, node: '^ at {file}:{line}:{col}', # cargo: '^ {file}:{line}:{col}', # python, thefuck: '^ File "{file}", line {line}', # awk: '^awk: {file}:{line}:', # git '^fatal: bad config file line {line} in {file}', # llc: '^llc: {file}:{line}:{col}:', # lua: '^lua: {file}:{line}:', # fish: '^{file} \(line {line}\):', # bash, sh, ssh: '^{file}: line {line}: ', # ghc, make, ruby, zsh: '^{file}:{line}:', # cargo, clang, gcc, go, rustc: '^{file}:{line}:{col}', # perl: 'at {file} line {line}', ) # for the sake of readability do not use named groups above def _make_pattern(pattern): pattern = pattern.replace('{file}', '(?P<file>[^:\n]+)') pattern = pattern.replace('{line}', '(?P<line>[0-9]+)') pattern = pattern.replace('{col}', '(?P<col>[0-9]+)') return re.compile(pattern, re.MULTILINE) patterns = [_make_pattern(p) for p in patterns] @memoize def _search(stderr): for pattern in patterns: m = re.search(pattern, stderr) if m: return m def match(command, settings): if 'EDITOR' not in os.environ: return False m = _search(command.stderr) return m and os.path.isfile(m.group('file')) def get_new_command(command, settings): m = _search(command.stderr) # Note: there does not seem to be a standard for columns, so they are just # ignored for now return shells.and_('{} {} +{}'.format(os.environ['EDITOR'], m.group('file'), m.group('line')), command.script)
BugsInPy/BugsInPy/temp/projects/thefuck/bug-30-fixed/thefuck/thefuck/rules/fix_file.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-30-buggy/thefuck/thefuck/rules/fix_file.py
thefuck-bug-8
import subprocess import re from thefuck.specific.sudo import sudo_support from thefuck.utils import for_app, replace_command from thefuck.specific.dnf import dnf_available regex = re.compile(r'No such command: (.*)\.') @for_app('dnf') @sudo_support def match(command): return 'no such command' in command.output.lower() def _parse_operations(help_text_lines): # The regex has to be a bytes-style regex since reading from a file # like stdin returns a bytes-style object and a string-style regex # wouldn't work. operation_regex = re.compile(b'^([a-z-]+) +', re.MULTILINE) return operation_regex.findall(help_text_lines) def _get_operations(): proc = subprocess.Popen(["dnf", '--help'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) lines = proc.stdout.read() return _parse_operations(lines) @sudo_support def get_new_command(command): misspelled_command = regex.findall(command.output)[0] return replace_command(command, misspelled_command, _get_operations()) enabled_by_default = dnf_available import subprocess import re from thefuck.specific.sudo import sudo_support from thefuck.utils import for_app, replace_command from thefuck.specific.dnf import dnf_available regex = re.compile(r'No such command: (.*)\.') @for_app('dnf') @sudo_support def match(command): return 'no such command' in command.output.lower() def _parse_operations(help_text_lines): operation_regex = re.compile(r'^([a-z-]+) +', re.MULTILINE) return operation_regex.findall(help_text_lines) def _get_operations(): proc = subprocess.Popen(["dnf", '--help'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) lines = proc.stdout.read().decode("utf-8") return _parse_operations(lines) @sudo_support def get_new_command(command): misspelled_command = regex.findall(command.output)[0] return replace_command(command, misspelled_command, _get_operations()) enabled_by_default = dnf_available
BugsInPy/BugsInPy/temp/projects/thefuck/bug-8-fixed/thefuck/thefuck/rules/dnf_no_such_command.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-8-buggy/thefuck/thefuck/rules/dnf_no_such_command.py
thefuck-bug-27
# Opens URL's in the default web browser # # Example: # > open github.com # The file ~/github.com does not exist. # Perhaps you meant 'http://github.com'? # def match(command, settings): return (command.script.startswith(('open', 'xdg-open', 'gnome-open', 'kde-open')) and ( '.com' in command.script or '.net' in command.script or '.org' in command.script or '.ly' in command.script or '.io' in command.script or '.se' in command.script or '.edu' in command.script or '.info' in command.script or '.me' in command.script or 'www.' in command.script)) def get_new_command(command, settings): return 'open http://' + command.script[5:] # Opens URL's in the default web browser # # Example: # > open github.com # The file ~/github.com does not exist. # Perhaps you meant 'http://github.com'? # def match(command, settings): return (command.script.startswith(('open', 'xdg-open', 'gnome-open', 'kde-open')) and ( '.com' in command.script or '.net' in command.script or '.org' in command.script or '.ly' in command.script or '.io' in command.script or '.se' in command.script or '.edu' in command.script or '.info' in command.script or '.me' in command.script or 'www.' in command.script)) def get_new_command(command, settings): return command.script.replace('open ', 'open http://')
BugsInPy/BugsInPy/temp/projects/thefuck/bug-27-fixed/thefuck/thefuck/rules/open.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-27-buggy/thefuck/thefuck/rules/open.py
thefuck-bug-22
from collections import namedtuple from traceback import format_stack from .logs import debug Command = namedtuple('Command', ('script', 'stdout', 'stderr')) Rule = namedtuple('Rule', ('name', 'match', 'get_new_command', 'enabled_by_default', 'side_effect', 'priority', 'requires_output')) class CorrectedCommand(object): def __init__(self, script, side_effect, priority): self.script = script self.side_effect = side_effect self.priority = priority def __eq__(self, other): """Ignores `priority` field.""" if isinstance(other, CorrectedCommand): return (other.script, other.side_effect) ==\ (self.script, self.side_effect) else: return False def __hash__(self): return (self.script, self.side_effect).__hash__() def __repr__(self): return 'CorrectedCommand(script={}, side_effect={}, priority={})'.format( self.script, self.side_effect, self.priority) class RulesNamesList(list): """Wrapper a top of list for storing rules names.""" def __contains__(self, item): return super(RulesNamesList, self).__contains__(item.name) class Settings(dict): def __getattr__(self, item): return self.get(item) def update(self, **kwargs): """ Returns new settings with values from `kwargs` for unset settings. """ conf = dict(kwargs) conf.update(self) return Settings(conf) class SortedCorrectedCommandsSequence(object): """List-like collection/wrapper around generator, that: - immediately gives access to the first commands through []; - realises generator and sorts commands on first access to other commands through [], or when len called. """ def __init__(self, commands, settings): self._settings = settings self._commands = commands self._cached = self._realise_first() self._realised = False def _realise_first(self): try: return [next(self._commands)] except StopIteration: return [] def _remove_duplicates(self, corrected_commands): """Removes low-priority duplicates.""" commands = {command for command in sorted(corrected_commands, key=lambda command: -command.priority) if command.script != self._cached[0]} return commands def _realise(self): """Realises generator, removes duplicates and sorts commands.""" commands = self._remove_duplicates(self._commands) self._cached = [self._cached[0]] + sorted( commands, key=lambda corrected_command: corrected_command.priority) self._realised = True debug('SortedCommandsSequence was realised with: {}, after: {}'.format( self._cached, '\n'.join(format_stack())), self._settings) def __getitem__(self, item): if item != 0 and not self._realised: self._realise() return self._cached[item] def __bool__(self): return bool(self._cached) def __len__(self): if not self._realised: self._realise() return len(self._cached) def __iter__(self): if not self._realised: self._realise() return iter(self._cached) from collections import namedtuple from traceback import format_stack from .logs import debug Command = namedtuple('Command', ('script', 'stdout', 'stderr')) Rule = namedtuple('Rule', ('name', 'match', 'get_new_command', 'enabled_by_default', 'side_effect', 'priority', 'requires_output')) class CorrectedCommand(object): def __init__(self, script, side_effect, priority): self.script = script self.side_effect = side_effect self.priority = priority def __eq__(self, other): """Ignores `priority` field.""" if isinstance(other, CorrectedCommand): return (other.script, other.side_effect) ==\ (self.script, self.side_effect) else: return False def __hash__(self): return (self.script, self.side_effect).__hash__() def __repr__(self): return 'CorrectedCommand(script={}, side_effect={}, priority={})'.format( self.script, self.side_effect, self.priority) class RulesNamesList(list): """Wrapper a top of list for storing rules names.""" def __contains__(self, item): return super(RulesNamesList, self).__contains__(item.name) class Settings(dict): def __getattr__(self, item): return self.get(item) def update(self, **kwargs): """ Returns new settings with values from `kwargs` for unset settings. """ conf = dict(kwargs) conf.update(self) return Settings(conf) class SortedCorrectedCommandsSequence(object): """List-like collection/wrapper around generator, that: - immediately gives access to the first commands through []; - realises generator and sorts commands on first access to other commands through [], or when len called. """ def __init__(self, commands, settings): self._settings = settings self._commands = commands self._cached = self._realise_first() self._realised = False def _realise_first(self): try: return [next(self._commands)] except StopIteration: return [] def _remove_duplicates(self, corrected_commands): """Removes low-priority duplicates.""" commands = {command for command in sorted(corrected_commands, key=lambda command: -command.priority) if command.script != self._cached[0]} return commands def _realise(self): """Realises generator, removes duplicates and sorts commands.""" if self._cached: commands = self._remove_duplicates(self._commands) self._cached = [self._cached[0]] + sorted( commands, key=lambda corrected_command: corrected_command.priority) self._realised = True debug('SortedCommandsSequence was realised with: {}, after: {}'.format( self._cached, '\n'.join(format_stack())), self._settings) def __getitem__(self, item): if item != 0 and not self._realised: self._realise() return self._cached[item] def __bool__(self): return bool(self._cached) def __len__(self): if not self._realised: self._realise() return len(self._cached) def __iter__(self): if not self._realised: self._realise() return iter(self._cached)
BugsInPy/BugsInPy/temp/projects/thefuck/bug-22-fixed/thefuck/thefuck/types.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-22-buggy/thefuck/thefuck/types.py
thefuck-bug-29
from collections import namedtuple Command = namedtuple('Command', ('script', 'stdout', 'stderr')) CorrectedCommand = namedtuple('CorrectedCommand', ('script', 'side_effect', 'priority')) Rule = namedtuple('Rule', ('name', 'match', 'get_new_command', 'enabled_by_default', 'side_effect', 'priority', 'requires_output')) class RulesNamesList(list): """Wrapper a top of list for storing rules names.""" def __contains__(self, item): return super(RulesNamesList, self).__contains__(item.name) class Settings(dict): def __getattr__(self, item): return self.get(item) def update(self, **kwargs): """Returns new settings with new values from `kwargs`.""" conf = dict(self) conf.update(kwargs) return Settings(conf) from collections import namedtuple Command = namedtuple('Command', ('script', 'stdout', 'stderr')) CorrectedCommand = namedtuple('CorrectedCommand', ('script', 'side_effect', 'priority')) Rule = namedtuple('Rule', ('name', 'match', 'get_new_command', 'enabled_by_default', 'side_effect', 'priority', 'requires_output')) class RulesNamesList(list): """Wrapper a top of list for storing rules names.""" def __contains__(self, item): return super(RulesNamesList, self).__contains__(item.name) class Settings(dict): def __getattr__(self, item): return self.get(item) def update(self, **kwargs): """ Returns new settings with values from `kwargs` for unset settings. """ conf = dict(kwargs) conf.update(self) return Settings(conf)
BugsInPy/BugsInPy/temp/projects/thefuck/bug-29-fixed/thefuck/thefuck/types.py,BugsInPy/BugsInPy/temp/projects/thefuck/bug-29-buggy/thefuck/thefuck/types.py
luigi-bug-5
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ ============================================================ Using ``inherits`` and ``requires`` to ease parameter pain ============================================================ Most luigi plumbers will find themselves in an awkward task parameter situation at some point or another. Consider the following "parameter explosion" problem: .. code-block:: python class TaskA(luigi.ExternalTask): param_a = luigi.Parameter() def output(self): return luigi.LocalTarget('/tmp/log-{t.param_a}'.format(t=self)) class TaskB(luigi.Task): param_b = luigi.Parameter() param_a = luigi.Parameter() def requires(self): return TaskA(param_a=self.param_a) class TaskC(luigi.Task): param_c = luigi.Parameter() param_b = luigi.Parameter() param_a = luigi.Parameter() def requires(self): return TaskB(param_b=self.param_b, param_a=self.param_a) In work flows requiring many tasks to be chained together in this manner, parameter handling can spiral out of control. Each downstream task becomes more burdensome than the last. Refactoring becomes more difficult. There are several ways one might try and avoid the problem. **Approach 1**: Parameters via command line or config instead of ``requires``. .. code-block:: python class TaskA(luigi.ExternalTask): param_a = luigi.Parameter() def output(self): return luigi.LocalTarget('/tmp/log-{t.param_a}'.format(t=self)) class TaskB(luigi.Task): param_b = luigi.Parameter() def requires(self): return TaskA() class TaskC(luigi.Task): param_c = luigi.Parameter() def requires(self): return TaskB() Then run in the shell like so: .. code-block:: bash luigi --module my_tasks TaskC --param-c foo --TaskB-param-b bar --TaskA-param-a baz Repetitive parameters have been eliminated, but at the cost of making the job's command line interface slightly clunkier. Often this is a reasonable trade-off. But parameters can't always be refactored out every class. Downstream tasks might also need to use some of those parameters. For example, if ``TaskC`` needs to use ``param_a`` too, then ``param_a`` would still need to be repeated. **Approach 2**: Use a common parameter class .. code-block:: python class Params(luigi.Config): param_c = luigi.Parameter() param_b = luigi.Parameter() param_a = luigi.Parameter() class TaskA(Params, luigi.ExternalTask): def output(self): return luigi.LocalTarget('/tmp/log-{t.param_a}'.format(t=self)) class TaskB(Params): def requires(self): return TaskA() class TaskB(Params): def requires(self): return TaskB() This looks great at first glance, but a couple of issues lurk. Now ``TaskA`` and ``TaskB`` have unnecessary significant parameters. Significant parameters help define the identity of a task. Identical tasks are prevented from running at the same time by the central planner. This helps preserve the idempotent and atomic nature of luigi tasks. Unnecessary significant task parameters confuse a task's identity. Under the right circumstances, task identity confusion could lead to that task running when it shouldn't, or failing to run when it should. This approach should only be used when all of the parameters of the config class, are significant (or all insignificant) for all of its subclasses. And wait a second... there's a bug in the above code. See it? ``TaskA`` won't behave as an ``ExternalTask`` because the parent classes are specified in the wrong order. This contrived example is easy to fix (by swapping the ordering of the parents of ``TaskA``), but real world cases can be more difficult to both spot and fix. Inheriting from multiple classes derived from ``luigi.Task`` should be undertaken with caution and avoided where possible. **Approach 3**: Use ``inherits`` and ``requires`` The ``inherits`` class decorator in this module copies parameters (and nothing else) from one task class to another, and avoids direct pythonic inheritance. .. code-block:: python import luigi from luigi.util import inherits class TaskA(luigi.ExternalTask): param_a = luigi.Parameter() def output(self): return luigi.LocalTarget('/tmp/log-{t.param_a}'.format(t=self)) @inherits(TaskA) class TaskB(luigi.Task): param_b = luigi.Parameter() def requires(self): t = self.clone(TaskA) # or t = self.clone_parent() # Wait... whats this clone thingy do? # # Pass it a task class. It calls that task. And when it does, it # supplies all parameters (and only those parameters) common to # the caller and callee! # # The call to clone is equivalent to the following (note the # fact that clone avoids passing param_b). # # return TaskA(param_a=self.param_a) return t @inherits(TaskB) class TaskC(luigi.Task): param_c = luigi.Parameter() def requires(self): return self.clone(TaskB) This totally eliminates the need to repeat parameters, avoids inheritance issues, and keeps the task command line interface as simple (as it can be, anyway). Refactoring task parameters is also much easier. The ``requires`` helper function can reduce this pattern even further. It does everything ``inherits`` does, and also attaches a ``requires`` method to your task (still all without pythonic inheritance). But how does it know how to invoke the upstream task? It uses ``clone`` behind the scenes! .. code-block:: python import luigi from luigi.util import inherits, requires class TaskA(luigi.ExternalTask): param_a = luigi.Parameter() def output(self): return luigi.LocalTarget('/tmp/log-{t.param_a}'.format(t=self)) @requires(TaskA) class TaskB(luigi.Task): param_b = luigi.Parameter() # The class decorator does this for me! # def requires(self): # return self.clone(TaskA) Use these helper functions effectively to avoid unnecessary repetition and dodge a few potentially nasty workflow pitfalls at the same time. Brilliant! """ import datetime import logging from luigi import six from luigi import task from luigi import parameter if six.PY3: xrange = range logger = logging.getLogger('luigi-interface') def common_params(task_instance, task_cls): """ Grab all the values in task_instance that are found in task_cls. """ if not isinstance(task_cls, task.Register): raise TypeError("task_cls must be an uninstantiated Task") task_instance_param_names = dict(task_instance.get_params()).keys() task_cls_params_dict = dict(task_cls.get_params()) task_cls_param_names = task_cls_params_dict.keys() common_param_names = set(task_instance_param_names).intersection(set(task_cls_param_names)) common_param_vals = [(key, task_cls_params_dict[key]) for key in common_param_names] common_kwargs = dict((key, task_instance.param_kwargs[key]) for key in common_param_names) vals = dict(task_instance.get_param_values(common_param_vals, [], common_kwargs)) return vals class inherits(object): """ Task inheritance. Usage: .. code-block:: python class AnotherTask(luigi.Task): n = luigi.IntParameter() # ... @inherits(AnotherTask): class MyTask(luigi.Task): def requires(self): return self.clone_parent() def run(self): print self.n # this will be defined # ... """ def __init__(self, task_to_inherit): super(inherits, self).__init__() self.task_to_inherit = task_to_inherit def __call__(self, task_that_inherits): for param_name, param_obj in self.task_to_inherit.get_params(): if not hasattr(task_that_inherits, param_name): setattr(task_that_inherits, param_name, param_obj) # Modify task_that_inherits by subclassing it and adding methods @task._task_wraps(task_that_inherits) class Wrapped(task_that_inherits): def clone_parent(_self, **args): return _self.clone(cls=self.task_to_inherit, **args) return Wrapped class requires(object): """ Same as @inherits, but also auto-defines the requires method. """ def __init__(self, task_to_require): super(requires, self).__init__() self.inherit_decorator = inherits(task_to_require) def __call__(self, task_that_requires): task_that_requires = self.inherit_decorator(task_that_requires) # Modify task_that_requres by subclassing it and adding methods @task._task_wraps(task_that_requires) class Wrapped(task_that_requires): def requires(_self): return _self.clone_parent() return Wrapped class copies(object): """ Auto-copies a task. Usage: .. code-block:: python @copies(MyTask): class CopyOfMyTask(luigi.Task): def output(self): return LocalTarget(self.date.strftime('/var/xyz/report-%Y-%m-%d')) """ def __init__(self, task_to_copy): super(copies, self).__init__() self.requires_decorator = requires(task_to_copy) def __call__(self, task_that_copies): task_that_copies = self.requires_decorator(task_that_copies) # Modify task_that_copies by subclassing it and adding methods @task._task_wraps(task_that_copies) class Wrapped(task_that_copies): def run(_self): i, o = _self.input(), _self.output() f = o.open('w') # TODO: assert that i, o are Target objects and not complex datastructures for line in i.open('r'): f.write(line) f.close() return Wrapped def delegates(task_that_delegates): """ Lets a task call methods on subtask(s). The way this works is that the subtask is run as a part of the task, but the task itself doesn't have to care about the requirements of the subtasks. The subtask doesn't exist from the scheduler's point of view, and its dependencies are instead required by the main task. Example: .. code-block:: python class PowersOfN(luigi.Task): n = luigi.IntParameter() def f(self, x): return x ** self.n @delegates class T(luigi.Task): def subtasks(self): return PowersOfN(5) def run(self): print self.subtasks().f(42) """ if not hasattr(task_that_delegates, 'subtasks'): # This method can (optionally) define a couple of delegate tasks that # will be accessible as interfaces, meaning that the task can access # those tasks and run methods defined on them, etc raise AttributeError('%s needs to implement the method "subtasks"' % task_that_delegates) @task._task_wraps(task_that_delegates) class Wrapped(task_that_delegates): def deps(self): # Overrides method in base class return task.flatten(self.requires()) + task.flatten([t.deps() for t in task.flatten(self.subtasks())]) def run(self): for t in task.flatten(self.subtasks()): t.run() task_that_delegates.run(self) return Wrapped def previous(task): """ Return a previous Task of the same family. By default checks if this task family only has one non-global parameter and if it is a DateParameter, DateHourParameter or DateIntervalParameter in which case it returns with the time decremented by 1 (hour, day or interval) """ params = task.get_params() previous_params = {} previous_date_params = {} for param_name, param_obj in params: param_value = getattr(task, param_name) if isinstance(param_obj, parameter.DateParameter): previous_date_params[param_name] = param_value - datetime.timedelta(days=1) elif isinstance(param_obj, parameter.DateSecondParameter): previous_date_params[param_name] = param_value - datetime.timedelta(seconds=1) elif isinstance(param_obj, parameter.DateMinuteParameter): previous_date_params[param_name] = param_value - datetime.timedelta(minutes=1) elif isinstance(param_obj, parameter.DateHourParameter): previous_date_params[param_name] = param_value - datetime.timedelta(hours=1) elif isinstance(param_obj, parameter.DateIntervalParameter): previous_date_params[param_name] = param_value.prev() else: previous_params[param_name] = param_value previous_params.update(previous_date_params) if len(previous_date_params) == 0: raise NotImplementedError("No task parameter - can't determine previous task") elif len(previous_date_params) > 1: raise NotImplementedError("Too many date-related task parameters - can't determine previous task") else: return task.clone(**previous_params) def get_previous_completed(task, max_steps=10): prev = task for _ in xrange(max_steps): prev = previous(prev) logger.debug("Checking if %s is complete", prev) if prev.complete(): return prev return None # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ ============================================================ Using ``inherits`` and ``requires`` to ease parameter pain ============================================================ Most luigi plumbers will find themselves in an awkward task parameter situation at some point or another. Consider the following "parameter explosion" problem: .. code-block:: python class TaskA(luigi.ExternalTask): param_a = luigi.Parameter() def output(self): return luigi.LocalTarget('/tmp/log-{t.param_a}'.format(t=self)) class TaskB(luigi.Task): param_b = luigi.Parameter() param_a = luigi.Parameter() def requires(self): return TaskA(param_a=self.param_a) class TaskC(luigi.Task): param_c = luigi.Parameter() param_b = luigi.Parameter() param_a = luigi.Parameter() def requires(self): return TaskB(param_b=self.param_b, param_a=self.param_a) In work flows requiring many tasks to be chained together in this manner, parameter handling can spiral out of control. Each downstream task becomes more burdensome than the last. Refactoring becomes more difficult. There are several ways one might try and avoid the problem. **Approach 1**: Parameters via command line or config instead of ``requires``. .. code-block:: python class TaskA(luigi.ExternalTask): param_a = luigi.Parameter() def output(self): return luigi.LocalTarget('/tmp/log-{t.param_a}'.format(t=self)) class TaskB(luigi.Task): param_b = luigi.Parameter() def requires(self): return TaskA() class TaskC(luigi.Task): param_c = luigi.Parameter() def requires(self): return TaskB() Then run in the shell like so: .. code-block:: bash luigi --module my_tasks TaskC --param-c foo --TaskB-param-b bar --TaskA-param-a baz Repetitive parameters have been eliminated, but at the cost of making the job's command line interface slightly clunkier. Often this is a reasonable trade-off. But parameters can't always be refactored out every class. Downstream tasks might also need to use some of those parameters. For example, if ``TaskC`` needs to use ``param_a`` too, then ``param_a`` would still need to be repeated. **Approach 2**: Use a common parameter class .. code-block:: python class Params(luigi.Config): param_c = luigi.Parameter() param_b = luigi.Parameter() param_a = luigi.Parameter() class TaskA(Params, luigi.ExternalTask): def output(self): return luigi.LocalTarget('/tmp/log-{t.param_a}'.format(t=self)) class TaskB(Params): def requires(self): return TaskA() class TaskB(Params): def requires(self): return TaskB() This looks great at first glance, but a couple of issues lurk. Now ``TaskA`` and ``TaskB`` have unnecessary significant parameters. Significant parameters help define the identity of a task. Identical tasks are prevented from running at the same time by the central planner. This helps preserve the idempotent and atomic nature of luigi tasks. Unnecessary significant task parameters confuse a task's identity. Under the right circumstances, task identity confusion could lead to that task running when it shouldn't, or failing to run when it should. This approach should only be used when all of the parameters of the config class, are significant (or all insignificant) for all of its subclasses. And wait a second... there's a bug in the above code. See it? ``TaskA`` won't behave as an ``ExternalTask`` because the parent classes are specified in the wrong order. This contrived example is easy to fix (by swapping the ordering of the parents of ``TaskA``), but real world cases can be more difficult to both spot and fix. Inheriting from multiple classes derived from ``luigi.Task`` should be undertaken with caution and avoided where possible. **Approach 3**: Use ``inherits`` and ``requires`` The ``inherits`` class decorator in this module copies parameters (and nothing else) from one task class to another, and avoids direct pythonic inheritance. .. code-block:: python import luigi from luigi.util import inherits class TaskA(luigi.ExternalTask): param_a = luigi.Parameter() def output(self): return luigi.LocalTarget('/tmp/log-{t.param_a}'.format(t=self)) @inherits(TaskA) class TaskB(luigi.Task): param_b = luigi.Parameter() def requires(self): t = self.clone(TaskA) # or t = self.clone_parent() # Wait... whats this clone thingy do? # # Pass it a task class. It calls that task. And when it does, it # supplies all parameters (and only those parameters) common to # the caller and callee! # # The call to clone is equivalent to the following (note the # fact that clone avoids passing param_b). # # return TaskA(param_a=self.param_a) return t @inherits(TaskB) class TaskC(luigi.Task): param_c = luigi.Parameter() def requires(self): return self.clone(TaskB) This totally eliminates the need to repeat parameters, avoids inheritance issues, and keeps the task command line interface as simple (as it can be, anyway). Refactoring task parameters is also much easier. The ``requires`` helper function can reduce this pattern even further. It does everything ``inherits`` does, and also attaches a ``requires`` method to your task (still all without pythonic inheritance). But how does it know how to invoke the upstream task? It uses ``clone`` behind the scenes! .. code-block:: python import luigi from luigi.util import inherits, requires class TaskA(luigi.ExternalTask): param_a = luigi.Parameter() def output(self): return luigi.LocalTarget('/tmp/log-{t.param_a}'.format(t=self)) @requires(TaskA) class TaskB(luigi.Task): param_b = luigi.Parameter() # The class decorator does this for me! # def requires(self): # return self.clone(TaskA) Use these helper functions effectively to avoid unnecessary repetition and dodge a few potentially nasty workflow pitfalls at the same time. Brilliant! """ import datetime import logging from luigi import six from luigi import task from luigi import parameter if six.PY3: xrange = range logger = logging.getLogger('luigi-interface') def common_params(task_instance, task_cls): """ Grab all the values in task_instance that are found in task_cls. """ if not isinstance(task_cls, task.Register): raise TypeError("task_cls must be an uninstantiated Task") task_instance_param_names = dict(task_instance.get_params()).keys() task_cls_params_dict = dict(task_cls.get_params()) task_cls_param_names = task_cls_params_dict.keys() common_param_names = set(task_instance_param_names).intersection(set(task_cls_param_names)) common_param_vals = [(key, task_cls_params_dict[key]) for key in common_param_names] common_kwargs = dict((key, task_instance.param_kwargs[key]) for key in common_param_names) vals = dict(task_instance.get_param_values(common_param_vals, [], common_kwargs)) return vals class inherits(object): """ Task inheritance. Usage: .. code-block:: python class AnotherTask(luigi.Task): n = luigi.IntParameter() # ... @inherits(AnotherTask): class MyTask(luigi.Task): def requires(self): return self.clone_parent() def run(self): print self.n # this will be defined # ... """ def __init__(self, task_to_inherit): super(inherits, self).__init__() self.task_to_inherit = task_to_inherit def __call__(self, task_that_inherits): # Get all parameter objects from the underlying task for param_name, param_obj in self.task_to_inherit.get_params(): # Check if the parameter exists in the inheriting task if not hasattr(task_that_inherits, param_name): # If not, add it to the inheriting task setattr(task_that_inherits, param_name, param_obj) # Modify task_that_inherits by adding methods def clone_parent(_self, **args): return _self.clone(cls=self.task_to_inherit, **args) task_that_inherits.clone_parent = clone_parent return task_that_inherits class requires(object): """ Same as @inherits, but also auto-defines the requires method. """ def __init__(self, task_to_require): super(requires, self).__init__() self.inherit_decorator = inherits(task_to_require) def __call__(self, task_that_requires): task_that_requires = self.inherit_decorator(task_that_requires) # Modify task_that_requres by adding methods def requires(_self): return _self.clone_parent() task_that_requires.requires = requires return task_that_requires class copies(object): """ Auto-copies a task. Usage: .. code-block:: python @copies(MyTask): class CopyOfMyTask(luigi.Task): def output(self): return LocalTarget(self.date.strftime('/var/xyz/report-%Y-%m-%d')) """ def __init__(self, task_to_copy): super(copies, self).__init__() self.requires_decorator = requires(task_to_copy) def __call__(self, task_that_copies): task_that_copies = self.requires_decorator(task_that_copies) # Modify task_that_copies by subclassing it and adding methods @task._task_wraps(task_that_copies) class Wrapped(task_that_copies): def run(_self): i, o = _self.input(), _self.output() f = o.open('w') # TODO: assert that i, o are Target objects and not complex datastructures for line in i.open('r'): f.write(line) f.close() return Wrapped def delegates(task_that_delegates): """ Lets a task call methods on subtask(s). The way this works is that the subtask is run as a part of the task, but the task itself doesn't have to care about the requirements of the subtasks. The subtask doesn't exist from the scheduler's point of view, and its dependencies are instead required by the main task. Example: .. code-block:: python class PowersOfN(luigi.Task): n = luigi.IntParameter() def f(self, x): return x ** self.n @delegates class T(luigi.Task): def subtasks(self): return PowersOfN(5) def run(self): print self.subtasks().f(42) """ if not hasattr(task_that_delegates, 'subtasks'): # This method can (optionally) define a couple of delegate tasks that # will be accessible as interfaces, meaning that the task can access # those tasks and run methods defined on them, etc raise AttributeError('%s needs to implement the method "subtasks"' % task_that_delegates) @task._task_wraps(task_that_delegates) class Wrapped(task_that_delegates): def deps(self): # Overrides method in base class return task.flatten(self.requires()) + task.flatten([t.deps() for t in task.flatten(self.subtasks())]) def run(self): for t in task.flatten(self.subtasks()): t.run() task_that_delegates.run(self) return Wrapped def previous(task): """ Return a previous Task of the same family. By default checks if this task family only has one non-global parameter and if it is a DateParameter, DateHourParameter or DateIntervalParameter in which case it returns with the time decremented by 1 (hour, day or interval) """ params = task.get_params() previous_params = {} previous_date_params = {} for param_name, param_obj in params: param_value = getattr(task, param_name) if isinstance(param_obj, parameter.DateParameter): previous_date_params[param_name] = param_value - datetime.timedelta(days=1) elif isinstance(param_obj, parameter.DateSecondParameter): previous_date_params[param_name] = param_value - datetime.timedelta(seconds=1) elif isinstance(param_obj, parameter.DateMinuteParameter): previous_date_params[param_name] = param_value - datetime.timedelta(minutes=1) elif isinstance(param_obj, parameter.DateHourParameter): previous_date_params[param_name] = param_value - datetime.timedelta(hours=1) elif isinstance(param_obj, parameter.DateIntervalParameter): previous_date_params[param_name] = param_value.prev() else: previous_params[param_name] = param_value previous_params.update(previous_date_params) if len(previous_date_params) == 0: raise NotImplementedError("No task parameter - can't determine previous task") elif len(previous_date_params) > 1: raise NotImplementedError("Too many date-related task parameters - can't determine previous task") else: return task.clone(**previous_params) def get_previous_completed(task, max_steps=10): prev = task for _ in xrange(max_steps): prev = previous(prev) logger.debug("Checking if %s is complete", prev) if prev.complete(): return prev return None
BugsInPy/BugsInPy/temp/projects/luigi/bug-5-fixed/luigi/luigi/util.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-5-buggy/luigi/luigi/util.py
luigi-bug-12
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The implementations of the hdfs clients. The hadoop cli client and the snakebite client. """ from luigi.contrib.hdfs import config as hdfs_config from luigi.contrib.hdfs import snakebite_client as hdfs_snakebite_client from luigi.contrib.hdfs import webhdfs_client as hdfs_webhdfs_client from luigi.contrib.hdfs import hadoopcli_clients as hdfs_hadoopcli_clients import luigi.contrib.target import logging logger = logging.getLogger('luigi-interface') def get_autoconfig_client(): """ Creates the client as specified in the `luigi.cfg` configuration. """ configured_client = hdfs_config.get_configured_hdfs_client() if configured_client == "webhdfs": return hdfs_webhdfs_client.WebHdfsClient() if configured_client == "snakebite": return hdfs_snakebite_client.SnakebiteHdfsClient() if configured_client == "snakebite_with_hadoopcli_fallback": return luigi.contrib.target.CascadingClient([hdfs_snakebite_client.SnakebiteHdfsClient(), hdfs_hadoopcli_clients.create_hadoopcli_client()]) if configured_client == "hadoopcli": return hdfs_hadoopcli_clients.create_hadoopcli_client() raise Exception("Unknown hdfs client " + configured_client) def _with_ac(method_name): def result(*args, **kwargs): return getattr(get_autoconfig_client(), method_name)(*args, **kwargs) return result exists = _with_ac('exists') rename = _with_ac('rename') remove = _with_ac('remove') mkdir = _with_ac('mkdir') listdir = _with_ac('listdir') # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The implementations of the hdfs clients. The hadoop cli client and the snakebite client. """ import logging import threading from luigi.contrib.hdfs import config as hdfs_config from luigi.contrib.hdfs import snakebite_client as hdfs_snakebite_client from luigi.contrib.hdfs import webhdfs_client as hdfs_webhdfs_client from luigi.contrib.hdfs import hadoopcli_clients as hdfs_hadoopcli_clients import luigi.contrib.target logger = logging.getLogger('luigi-interface') _AUTOCONFIG_CLIENT = threading.local() def get_autoconfig_client(client_cache=_AUTOCONFIG_CLIENT): """ Creates the client as specified in the `luigi.cfg` configuration. """ try: return client_cache.client except AttributeError: configured_client = hdfs_config.get_configured_hdfs_client() if configured_client == "webhdfs": client_cache.client = hdfs_webhdfs_client.WebHdfsClient() elif configured_client == "snakebite": client_cache.client = hdfs_snakebite_client.SnakebiteHdfsClient() elif configured_client == "snakebite_with_hadoopcli_fallback": client_cache.client = luigi.contrib.target.CascadingClient([ hdfs_snakebite_client.SnakebiteHdfsClient(), hdfs_hadoopcli_clients.create_hadoopcli_client(), ]) elif configured_client == "hadoopcli": client_cache.client = hdfs_hadoopcli_clients.create_hadoopcli_client() else: raise Exception("Unknown hdfs client " + configured_client) return client_cache.client def _with_ac(method_name): def result(*args, **kwargs): return getattr(get_autoconfig_client(), method_name)(*args, **kwargs) return result exists = _with_ac('exists') rename = _with_ac('rename') remove = _with_ac('remove') mkdir = _with_ac('mkdir') listdir = _with_ac('listdir')
BugsInPy/BugsInPy/temp/projects/luigi/bug-12-fixed/luigi/luigi/contrib/hdfs/clients.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-12-buggy/luigi/luigi/contrib/hdfs/clients.py
luigi-bug-32
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Define the centralized register of all :class:`~luigi.task.Task` classes. """ import abc try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict from luigi import six class TaskClassException(Exception): pass class Register(abc.ABCMeta): """ The Metaclass of :py:class:`Task`. Acts as a global registry of Tasks with the following properties: 1. Cache instances of objects so that eg. ``X(1, 2, 3)`` always returns the same object. 2. Keep track of all subclasses of :py:class:`Task` and expose them. """ __instance_cache = {} _default_namespace = None _reg = [] AMBIGUOUS_CLASS = object() # Placeholder denoting an error """If this value is returned by :py:meth:`__get_reg` then there is an ambiguous task name (two :py:class:`Task` have the same name). This denotes an error.""" def __new__(metacls, classname, bases, classdict): """ Custom class creation for namespacing. Also register all subclasses. Set the task namespace to whatever the currently declared namespace is. """ if "task_namespace" not in classdict: classdict["task_namespace"] = metacls._default_namespace cls = super(Register, metacls).__new__(metacls, classname, bases, classdict) metacls._reg.append(cls) return cls def __call__(cls, *args, **kwargs): """ Custom class instantiation utilizing instance cache. If a Task has already been instantiated with the same parameters, the previous instance is returned to reduce number of object instances. """ def instantiate(): return super(Register, cls).__call__(*args, **kwargs) h = cls.__instance_cache if h is None: # disabled return instantiate() params = cls.get_params() param_values = cls.get_param_values(params, args, kwargs) k = (cls, tuple(param_values)) try: hash(k) except TypeError: logger.debug("Not all parameter values are hashable so instance isn't coming from the cache") return instantiate() # unhashable types in parameters if k not in h: h[k] = instantiate() return h[k] @classmethod def clear_instance_cache(cls): """ Clear/Reset the instance cache. """ cls.__instance_cache = {} @classmethod def disable_instance_cache(cls): """ Disables the instance cache. """ cls.__instance_cache = None @property def task_family(cls): """ The task family for the given class. If ``cls.task_namespace is None`` then it's the name of the class. Otherwise, ``<task_namespace>.`` is prefixed to the class name. """ if cls.task_namespace is None: return cls.__name__ else: return "%s.%s" % (cls.task_namespace, cls.__name__) @classmethod def __get_reg(cls, include_config_without_section=False): """Return all of the registered classes. :return: an ``collections.OrderedDict`` of task_family -> class """ # We have to do this on-demand in case task names have changed later # We return this in a topologically sorted list of inheritance: this is useful in some cases (#822) reg = OrderedDict() for cls in cls._reg: if cls.run == NotImplemented: continue if cls._config_without_section and not include_config_without_section: continue name = cls.task_family if name in reg and reg[name] != cls and \ reg[name] != cls.AMBIGUOUS_CLASS and \ not issubclass(cls, reg[name]): # Registering two different classes - this means we can't instantiate them by name # The only exception is if one class is a subclass of the other. In that case, we # instantiate the most-derived class (this fixes some issues with decorator wrappers). reg[name] = cls.AMBIGUOUS_CLASS else: reg[name] = cls return reg @classmethod def task_names(cls): """ List of task names as strings """ return sorted(cls.__get_reg().keys()) @classmethod def tasks_str(cls): """ Human-readable register contents dump. """ return ','.join(cls.task_names()) @classmethod def get_task_cls(cls, name): """ Returns an unambiguous class or raises an exception. """ task_cls = cls.__get_reg().get(name) if not task_cls: raise TaskClassException('Task %r not found. Candidates are: %s' % (name, cls.tasks_str())) if task_cls == cls.AMBIGUOUS_CLASS: raise TaskClassException('Task %r is ambiguous' % name) return task_cls @classmethod def get_all_params(cls): """ Compiles and returns all parameters for all :py:class:`Task`. :return: a ``dict`` of parameter name -> parameter. """ for task_name, task_cls in six.iteritems(cls.__get_reg(include_config_without_section=True)): if task_cls == cls.AMBIGUOUS_CLASS: continue for param_name, param_obj in task_cls.get_params(): yield task_name, task_cls._config_without_section, param_name, param_obj # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Define the centralized register of all :class:`~luigi.task.Task` classes. """ import abc try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict from luigi import six import logging logger = logging.getLogger('luigi-interface') class TaskClassException(Exception): pass class Register(abc.ABCMeta): """ The Metaclass of :py:class:`Task`. Acts as a global registry of Tasks with the following properties: 1. Cache instances of objects so that eg. ``X(1, 2, 3)`` always returns the same object. 2. Keep track of all subclasses of :py:class:`Task` and expose them. """ __instance_cache = {} _default_namespace = None _reg = [] AMBIGUOUS_CLASS = object() # Placeholder denoting an error """If this value is returned by :py:meth:`__get_reg` then there is an ambiguous task name (two :py:class:`Task` have the same name). This denotes an error.""" def __new__(metacls, classname, bases, classdict): """ Custom class creation for namespacing. Also register all subclasses. Set the task namespace to whatever the currently declared namespace is. """ if "task_namespace" not in classdict: classdict["task_namespace"] = metacls._default_namespace cls = super(Register, metacls).__new__(metacls, classname, bases, classdict) metacls._reg.append(cls) return cls def __call__(cls, *args, **kwargs): """ Custom class instantiation utilizing instance cache. If a Task has already been instantiated with the same parameters, the previous instance is returned to reduce number of object instances. """ def instantiate(): return super(Register, cls).__call__(*args, **kwargs) h = cls.__instance_cache if h is None: # disabled return instantiate() params = cls.get_params() param_values = cls.get_param_values(params, args, kwargs) k = (cls, tuple(param_values)) try: hash(k) except TypeError: logger.debug("Not all parameter values are hashable so instance isn't coming from the cache") return instantiate() # unhashable types in parameters if k not in h: h[k] = instantiate() return h[k] @classmethod def clear_instance_cache(cls): """ Clear/Reset the instance cache. """ cls.__instance_cache = {} @classmethod def disable_instance_cache(cls): """ Disables the instance cache. """ cls.__instance_cache = None @property def task_family(cls): """ The task family for the given class. If ``cls.task_namespace is None`` then it's the name of the class. Otherwise, ``<task_namespace>.`` is prefixed to the class name. """ if cls.task_namespace is None: return cls.__name__ else: return "%s.%s" % (cls.task_namespace, cls.__name__) @classmethod def __get_reg(cls, include_config_without_section=False): """Return all of the registered classes. :return: an ``collections.OrderedDict`` of task_family -> class """ # We have to do this on-demand in case task names have changed later # We return this in a topologically sorted list of inheritance: this is useful in some cases (#822) reg = OrderedDict() for cls in cls._reg: if cls.run == NotImplemented: continue if cls._config_without_section and not include_config_without_section: continue name = cls.task_family if name in reg and reg[name] != cls and \ reg[name] != cls.AMBIGUOUS_CLASS and \ not issubclass(cls, reg[name]): # Registering two different classes - this means we can't instantiate them by name # The only exception is if one class is a subclass of the other. In that case, we # instantiate the most-derived class (this fixes some issues with decorator wrappers). reg[name] = cls.AMBIGUOUS_CLASS else: reg[name] = cls return reg @classmethod def task_names(cls): """ List of task names as strings """ return sorted(cls.__get_reg().keys()) @classmethod def tasks_str(cls): """ Human-readable register contents dump. """ return ','.join(cls.task_names()) @classmethod def get_task_cls(cls, name): """ Returns an unambiguous class or raises an exception. """ task_cls = cls.__get_reg().get(name) if not task_cls: raise TaskClassException('Task %r not found. Candidates are: %s' % (name, cls.tasks_str())) if task_cls == cls.AMBIGUOUS_CLASS: raise TaskClassException('Task %r is ambiguous' % name) return task_cls @classmethod def get_all_params(cls): """ Compiles and returns all parameters for all :py:class:`Task`. :return: a ``dict`` of parameter name -> parameter. """ for task_name, task_cls in six.iteritems(cls.__get_reg(include_config_without_section=True)): if task_cls == cls.AMBIGUOUS_CLASS: continue for param_name, param_obj in task_cls.get_params(): yield task_name, task_cls._config_without_section, param_name, param_obj
BugsInPy/BugsInPy/temp/projects/luigi/bug-32-fixed/luigi/luigi/task_register.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-32-buggy/luigi/luigi/task_register.py
luigi-bug-28
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc import logging import operator import os import subprocess import tempfile from luigi import six import luigi import luigi.hadoop from luigi.target import FileAlreadyExists, FileSystemTarget from luigi.task import flatten if six.PY3: unicode = str logger = logging.getLogger('luigi-interface') class HiveCommandError(RuntimeError): def __init__(self, message, out=None, err=None): super(HiveCommandError, self).__init__(message, out, err) self.message = message self.out = out self.err = err def load_hive_cmd(): return luigi.configuration.get_config().get('hive', 'command', 'hive') def get_hive_syntax(): return luigi.configuration.get_config().get('hive', 'release', 'cdh4') def run_hive(args, check_return_code=True): """ Runs the `hive` from the command line, passing in the given args, and returning stdout. With the apache release of Hive, so of the table existence checks (which are done using DESCRIBE do not exit with a return code of 0 so we need an option to ignore the return code and just return stdout for parsing """ cmd = [load_hive_cmd()] + args p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if check_return_code and p.returncode != 0: raise HiveCommandError("Hive command: {0} failed with error code: {1}".format(" ".join(cmd), p.returncode), stdout, stderr) return stdout def run_hive_cmd(hivecmd, check_return_code=True): """ Runs the given hive query and returns stdout. """ return run_hive(['-e', hivecmd], check_return_code) def run_hive_script(script): """ Runs the contents of the given script in hive and returns stdout. """ if not os.path.isfile(script): raise RuntimeError("Hive script: {0} does not exist.".format(script)) return run_hive(['-f', script]) @six.add_metaclass(abc.ABCMeta) class HiveClient(object): # interface @abc.abstractmethod def table_location(self, table, database='default', partition=None): """ Returns location of db.table (or db.table.partition). partition is a dict of partition key to value. """ pass @abc.abstractmethod def table_schema(self, table, database='default'): """ Returns list of [(name, type)] for each column in database.table. """ pass @abc.abstractmethod def table_exists(self, table, database='default', partition=None): """ Returns true if db.table (or db.table.partition) exists. partition is a dict of partition key to value. """ pass @abc.abstractmethod def partition_spec(self, partition): """ Turn a dict into a string partition specification """ pass class HiveCommandClient(HiveClient): """ Uses `hive` invocations to find information. """ def table_location(self, table, database='default', partition=None): cmd = "use {0}; describe formatted {1}".format(database, table) if partition is not None: cmd += " PARTITION ({0})".format(self.partition_spec(partition)) stdout = run_hive_cmd(cmd) for line in stdout.split("\n"): if "Location:" in line: return line.split("\t")[1] def table_exists(self, table, database='default', partition=None): if partition is None: stdout = run_hive_cmd('use {0}; show tables like "{1}";'.format(database, table)) return stdout and table in stdout else: stdout = run_hive_cmd("""use %s; show partitions %s partition (%s)""" % (database, table, self.partition_spec(partition))) if stdout: return True else: return False def table_schema(self, table, database='default'): describe = run_hive_cmd("use {0}; describe {1}".format(database, table)) if not describe or "does not exist" in describe: return None return [tuple([x.strip() for x in line.strip().split("\t")]) for line in describe.strip().split("\n")] def partition_spec(self, partition): """ Turns a dict into the a Hive partition specification string. """ return ','.join(["{0}='{1}'".format(k, v) for (k, v) in sorted(six.iteritems(partition), key=operator.itemgetter(0))]) class ApacheHiveCommandClient(HiveCommandClient): """ A subclass for the HiveCommandClient to (in some cases) ignore the return code from the hive command so that we can just parse the output. """ def table_schema(self, table, database='default'): describe = run_hive_cmd("use {0}; describe {1}".format(database, table), False) if not describe or "Table not found" in describe: return None return [tuple([x.strip() for x in line.strip().split("\t")]) for line in describe.strip().split("\n")] class MetastoreClient(HiveClient): def table_location(self, table, database='default', partition=None): with HiveThriftContext() as client: if partition is not None: partition_str = self.partition_spec(partition) thrift_table = client.get_partition_by_name(database, table, partition_str) else: thrift_table = client.get_table(database, table) return thrift_table.sd.location def table_exists(self, table, database='default', partition=None): with HiveThriftContext() as client: if partition is None: return table in client.get_all_tables(database) else: return partition in self._existing_partitions(table, database, client) def _existing_partitions(self, table, database, client): def _parse_partition_string(partition_string): partition_def = {} for part in partition_string.split("/"): name, value = part.split("=") partition_def[name] = value return partition_def # -1 is max_parts, the # of partition names to return (-1 = unlimited) partition_strings = client.get_partition_names(database, table, -1) return [_parse_partition_string(existing_partition) for existing_partition in partition_strings] def table_schema(self, table, database='default'): with HiveThriftContext() as client: return [(field_schema.name, field_schema.type) for field_schema in client.get_schema(database, table)] def partition_spec(self, partition): return "/".join("%s=%s" % (k, v) for (k, v) in sorted(six.iteritems(partition), key=operator.itemgetter(0))) class HiveThriftContext(object): """ Context manager for hive metastore client. """ def __enter__(self): try: from thrift import Thrift from thrift.transport import TSocket from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol # Note that this will only work with a CDH release. # This uses the thrift bindings generated by the ThriftHiveMetastore service in Beeswax. # If using the Apache release of Hive this import will fail. from hive_metastore import ThriftHiveMetastore config = luigi.configuration.get_config() host = config.get('hive', 'metastore_host') port = config.getint('hive', 'metastore_port') transport = TSocket.TSocket(host, port) transport = TTransport.TBufferedTransport(transport) protocol = TBinaryProtocol.TBinaryProtocol(transport) transport.open() self.transport = transport return ThriftHiveMetastore.Client(protocol) except ImportError as e: raise Exception('Could not import Hive thrift library:' + str(e)) def __exit__(self, exc_type, exc_val, exc_tb): self.transport.close() def get_default_client(): if get_hive_syntax() == "apache": return ApacheHiveCommandClient() else: return HiveCommandClient() client = get_default_client() class HiveQueryTask(luigi.hadoop.BaseHadoopJobTask): """ Task to run a hive query. """ # by default, we let hive figure these out. n_reduce_tasks = None bytes_per_reducer = None reducers_max = None @abc.abstractmethod def query(self): """ Text of query to run in hive """ raise RuntimeError("Must implement query!") def hiverc(self): """ Location of an rc file to run before the query if hiverc-location key is specified in client.cfg, will default to the value there otherwise returns None. Returning a list of rc files will load all of them in order. """ return luigi.configuration.get_config().get('hive', 'hiverc-location', default=None) def hiveconfs(self): """ Returns an dict of key=value settings to be passed along to the hive command line via --hiveconf. By default, sets mapred.job.name to task_id and if not None, sets: * mapred.reduce.tasks (n_reduce_tasks) * mapred.fairscheduler.pool (pool) or mapred.job.queue.name (pool) * hive.exec.reducers.bytes.per.reducer (bytes_per_reducer) * hive.exec.reducers.max (reducers_max) """ jcs = {} jcs['mapred.job.name'] = self.task_id if self.n_reduce_tasks is not None: jcs['mapred.reduce.tasks'] = self.n_reduce_tasks if self.pool is not None: # Supporting two schedulers: fair (default) and capacity using the same option scheduler_type = luigi.configuration.get_config().get('hadoop', 'scheduler', 'fair') if scheduler_type == 'fair': jcs['mapred.fairscheduler.pool'] = self.pool elif scheduler_type == 'capacity': jcs['mapred.job.queue.name'] = self.pool if self.bytes_per_reducer is not None: jcs['hive.exec.reducers.bytes.per.reducer'] = self.bytes_per_reducer if self.reducers_max is not None: jcs['hive.exec.reducers.max'] = self.reducers_max return jcs def job_runner(self): return HiveQueryRunner() class HiveQueryRunner(luigi.hadoop.JobRunner): """ Runs a HiveQueryTask by shelling out to hive. """ def prepare_outputs(self, job): """ Called before job is started. If output is a `FileSystemTarget`, create parent directories so the hive command won't fail """ outputs = flatten(job.output()) for o in outputs: if isinstance(o, FileSystemTarget): parent_dir = os.path.dirname(o.path) if parent_dir and not o.fs.exists(parent_dir): logger.info("Creating parent directory %r", parent_dir) try: # there is a possible race condition # which needs to be handled here o.fs.mkdir(parent_dir) except FileAlreadyExists: pass def run_job(self, job): self.prepare_outputs(job) with tempfile.NamedTemporaryFile() as f: query = job.query() if isinstance(query, unicode): query = query.encode('utf8') f.write(query) f.flush() arglist = [load_hive_cmd(), '-f', f.name] hiverc = job.hiverc() if hiverc: if isinstance(hiverc, str): hiverc = [hiverc] for rcfile in hiverc: arglist += ['-i', rcfile] if job.hiveconfs(): for k, v in six.iteritems(job.hiveconfs()): arglist += ['--hiveconf', '{0}={1}'.format(k, v)] logger.info(arglist) return luigi.hadoop.run_and_track_hadoop_job(arglist) class HiveTableTarget(luigi.Target): """ exists returns true if the table exists. """ def __init__(self, table, database='default', client=None): self.database = database self.table = table self.hive_cmd = load_hive_cmd() if client is None: client = get_default_client() self.client = client def exists(self): logger.debug("Checking Hive table '%s.%s' exists", self.database, self.table) return self.client.table_exists(self.table, self.database) @property def path(self): """ Returns the path to this table in HDFS. """ location = self.client.table_location(self.table, self.database) if not location: raise Exception("Couldn't find location for table: {0}".format(str(self))) return location def open(self, mode): return NotImplementedError("open() is not supported for HiveTableTarget") class HivePartitionTarget(luigi.Target): """ exists returns true if the table's partition exists. """ def __init__(self, table, partition, database='default', fail_missing_table=True, client=None): self.database = database self.table = table self.partition = partition if client is None: client = get_default_client() self.client = client self.fail_missing_table = fail_missing_table def exists(self): try: logger.debug("Checking Hive table '{d}.{t}' for partition {p}".format(d=self.database, t=self.table, p=str(self.partition))) return self.client.table_exists(self.table, self.database, self.partition) except HiveCommandError: if self.fail_missing_table: raise else: if self.client.table_exists(self.table, self.database): # a real error occurred raise else: # oh the table just doesn't exist return False @property def path(self): """ Returns the path for this HiveTablePartitionTarget's data. """ location = self.client.table_location(self.table, self.database, self.partition) if not location: raise Exception("Couldn't find location for table: {0}".format(str(self))) return location def open(self, mode): return NotImplementedError("open() is not supported for HivePartitionTarget") class ExternalHiveTask(luigi.ExternalTask): """ External task that depends on a Hive table/partition. """ database = luigi.Parameter(default='default') table = luigi.Parameter() # since this is an external task and will never be initialized from the CLI, partition can be any python object, in this case a dictionary partition = luigi.Parameter(default=None, description='Python dictionary specifying the target partition e.g. {"date": "2013-01-25"}') def output(self): if self.partition is not None: assert self.partition, "partition required" return HivePartitionTarget(table=self.table, partition=self.partition, database=self.database) else: return HiveTableTarget(self.table, self.database) # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc import logging import operator import os import subprocess import tempfile from luigi import six import luigi import luigi.hadoop from luigi.target import FileAlreadyExists, FileSystemTarget from luigi.task import flatten if six.PY3: unicode = str logger = logging.getLogger('luigi-interface') class HiveCommandError(RuntimeError): def __init__(self, message, out=None, err=None): super(HiveCommandError, self).__init__(message, out, err) self.message = message self.out = out self.err = err def load_hive_cmd(): return luigi.configuration.get_config().get('hive', 'command', 'hive') def get_hive_syntax(): return luigi.configuration.get_config().get('hive', 'release', 'cdh4') def run_hive(args, check_return_code=True): """ Runs the `hive` from the command line, passing in the given args, and returning stdout. With the apache release of Hive, so of the table existence checks (which are done using DESCRIBE do not exit with a return code of 0 so we need an option to ignore the return code and just return stdout for parsing """ cmd = [load_hive_cmd()] + args p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if check_return_code and p.returncode != 0: raise HiveCommandError("Hive command: {0} failed with error code: {1}".format(" ".join(cmd), p.returncode), stdout, stderr) return stdout def run_hive_cmd(hivecmd, check_return_code=True): """ Runs the given hive query and returns stdout. """ return run_hive(['-e', hivecmd], check_return_code) def run_hive_script(script): """ Runs the contents of the given script in hive and returns stdout. """ if not os.path.isfile(script): raise RuntimeError("Hive script: {0} does not exist.".format(script)) return run_hive(['-f', script]) @six.add_metaclass(abc.ABCMeta) class HiveClient(object): # interface @abc.abstractmethod def table_location(self, table, database='default', partition=None): """ Returns location of db.table (or db.table.partition). partition is a dict of partition key to value. """ pass @abc.abstractmethod def table_schema(self, table, database='default'): """ Returns list of [(name, type)] for each column in database.table. """ pass @abc.abstractmethod def table_exists(self, table, database='default', partition=None): """ Returns true if db.table (or db.table.partition) exists. partition is a dict of partition key to value. """ pass @abc.abstractmethod def partition_spec(self, partition): """ Turn a dict into a string partition specification """ pass class HiveCommandClient(HiveClient): """ Uses `hive` invocations to find information. """ def table_location(self, table, database='default', partition=None): cmd = "use {0}; describe formatted {1}".format(database, table) if partition is not None: cmd += " PARTITION ({0})".format(self.partition_spec(partition)) stdout = run_hive_cmd(cmd) for line in stdout.split("\n"): if "Location:" in line: return line.split("\t")[1] def table_exists(self, table, database='default', partition=None): if partition is None: stdout = run_hive_cmd('use {0}; show tables like "{1}";'.format(database, table)) return stdout and table.lower() in stdout else: stdout = run_hive_cmd("""use %s; show partitions %s partition (%s)""" % (database, table, self.partition_spec(partition))) if stdout: return True else: return False def table_schema(self, table, database='default'): describe = run_hive_cmd("use {0}; describe {1}".format(database, table)) if not describe or "does not exist" in describe: return None return [tuple([x.strip() for x in line.strip().split("\t")]) for line in describe.strip().split("\n")] def partition_spec(self, partition): """ Turns a dict into the a Hive partition specification string. """ return ','.join(["{0}='{1}'".format(k, v) for (k, v) in sorted(six.iteritems(partition), key=operator.itemgetter(0))]) class ApacheHiveCommandClient(HiveCommandClient): """ A subclass for the HiveCommandClient to (in some cases) ignore the return code from the hive command so that we can just parse the output. """ def table_schema(self, table, database='default'): describe = run_hive_cmd("use {0}; describe {1}".format(database, table), False) if not describe or "Table not found" in describe: return None return [tuple([x.strip() for x in line.strip().split("\t")]) for line in describe.strip().split("\n")] class MetastoreClient(HiveClient): def table_location(self, table, database='default', partition=None): with HiveThriftContext() as client: if partition is not None: partition_str = self.partition_spec(partition) thrift_table = client.get_partition_by_name(database, table, partition_str) else: thrift_table = client.get_table(database, table) return thrift_table.sd.location def table_exists(self, table, database='default', partition=None): with HiveThriftContext() as client: if partition is None: return table in client.get_all_tables(database) else: return partition in self._existing_partitions(table, database, client) def _existing_partitions(self, table, database, client): def _parse_partition_string(partition_string): partition_def = {} for part in partition_string.split("/"): name, value = part.split("=") partition_def[name] = value return partition_def # -1 is max_parts, the # of partition names to return (-1 = unlimited) partition_strings = client.get_partition_names(database, table, -1) return [_parse_partition_string(existing_partition) for existing_partition in partition_strings] def table_schema(self, table, database='default'): with HiveThriftContext() as client: return [(field_schema.name, field_schema.type) for field_schema in client.get_schema(database, table)] def partition_spec(self, partition): return "/".join("%s=%s" % (k, v) for (k, v) in sorted(six.iteritems(partition), key=operator.itemgetter(0))) class HiveThriftContext(object): """ Context manager for hive metastore client. """ def __enter__(self): try: from thrift import Thrift from thrift.transport import TSocket from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol # Note that this will only work with a CDH release. # This uses the thrift bindings generated by the ThriftHiveMetastore service in Beeswax. # If using the Apache release of Hive this import will fail. from hive_metastore import ThriftHiveMetastore config = luigi.configuration.get_config() host = config.get('hive', 'metastore_host') port = config.getint('hive', 'metastore_port') transport = TSocket.TSocket(host, port) transport = TTransport.TBufferedTransport(transport) protocol = TBinaryProtocol.TBinaryProtocol(transport) transport.open() self.transport = transport return ThriftHiveMetastore.Client(protocol) except ImportError as e: raise Exception('Could not import Hive thrift library:' + str(e)) def __exit__(self, exc_type, exc_val, exc_tb): self.transport.close() def get_default_client(): if get_hive_syntax() == "apache": return ApacheHiveCommandClient() else: return HiveCommandClient() client = get_default_client() class HiveQueryTask(luigi.hadoop.BaseHadoopJobTask): """ Task to run a hive query. """ # by default, we let hive figure these out. n_reduce_tasks = None bytes_per_reducer = None reducers_max = None @abc.abstractmethod def query(self): """ Text of query to run in hive """ raise RuntimeError("Must implement query!") def hiverc(self): """ Location of an rc file to run before the query if hiverc-location key is specified in client.cfg, will default to the value there otherwise returns None. Returning a list of rc files will load all of them in order. """ return luigi.configuration.get_config().get('hive', 'hiverc-location', default=None) def hiveconfs(self): """ Returns an dict of key=value settings to be passed along to the hive command line via --hiveconf. By default, sets mapred.job.name to task_id and if not None, sets: * mapred.reduce.tasks (n_reduce_tasks) * mapred.fairscheduler.pool (pool) or mapred.job.queue.name (pool) * hive.exec.reducers.bytes.per.reducer (bytes_per_reducer) * hive.exec.reducers.max (reducers_max) """ jcs = {} jcs['mapred.job.name'] = self.task_id if self.n_reduce_tasks is not None: jcs['mapred.reduce.tasks'] = self.n_reduce_tasks if self.pool is not None: # Supporting two schedulers: fair (default) and capacity using the same option scheduler_type = luigi.configuration.get_config().get('hadoop', 'scheduler', 'fair') if scheduler_type == 'fair': jcs['mapred.fairscheduler.pool'] = self.pool elif scheduler_type == 'capacity': jcs['mapred.job.queue.name'] = self.pool if self.bytes_per_reducer is not None: jcs['hive.exec.reducers.bytes.per.reducer'] = self.bytes_per_reducer if self.reducers_max is not None: jcs['hive.exec.reducers.max'] = self.reducers_max return jcs def job_runner(self): return HiveQueryRunner() class HiveQueryRunner(luigi.hadoop.JobRunner): """ Runs a HiveQueryTask by shelling out to hive. """ def prepare_outputs(self, job): """ Called before job is started. If output is a `FileSystemTarget`, create parent directories so the hive command won't fail """ outputs = flatten(job.output()) for o in outputs: if isinstance(o, FileSystemTarget): parent_dir = os.path.dirname(o.path) if parent_dir and not o.fs.exists(parent_dir): logger.info("Creating parent directory %r", parent_dir) try: # there is a possible race condition # which needs to be handled here o.fs.mkdir(parent_dir) except FileAlreadyExists: pass def run_job(self, job): self.prepare_outputs(job) with tempfile.NamedTemporaryFile() as f: query = job.query() if isinstance(query, unicode): query = query.encode('utf8') f.write(query) f.flush() arglist = [load_hive_cmd(), '-f', f.name] hiverc = job.hiverc() if hiverc: if isinstance(hiverc, str): hiverc = [hiverc] for rcfile in hiverc: arglist += ['-i', rcfile] if job.hiveconfs(): for k, v in six.iteritems(job.hiveconfs()): arglist += ['--hiveconf', '{0}={1}'.format(k, v)] logger.info(arglist) return luigi.hadoop.run_and_track_hadoop_job(arglist) class HiveTableTarget(luigi.Target): """ exists returns true if the table exists. """ def __init__(self, table, database='default', client=None): self.database = database self.table = table self.hive_cmd = load_hive_cmd() if client is None: client = get_default_client() self.client = client def exists(self): logger.debug("Checking Hive table '%s.%s' exists", self.database, self.table) return self.client.table_exists(self.table, self.database) @property def path(self): """ Returns the path to this table in HDFS. """ location = self.client.table_location(self.table, self.database) if not location: raise Exception("Couldn't find location for table: {0}".format(str(self))) return location def open(self, mode): return NotImplementedError("open() is not supported for HiveTableTarget") class HivePartitionTarget(luigi.Target): """ exists returns true if the table's partition exists. """ def __init__(self, table, partition, database='default', fail_missing_table=True, client=None): self.database = database self.table = table self.partition = partition if client is None: client = get_default_client() self.client = client self.fail_missing_table = fail_missing_table def exists(self): try: logger.debug("Checking Hive table '{d}.{t}' for partition {p}".format(d=self.database, t=self.table, p=str(self.partition))) return self.client.table_exists(self.table, self.database, self.partition) except HiveCommandError: if self.fail_missing_table: raise else: if self.client.table_exists(self.table, self.database): # a real error occurred raise else: # oh the table just doesn't exist return False @property def path(self): """ Returns the path for this HiveTablePartitionTarget's data. """ location = self.client.table_location(self.table, self.database, self.partition) if not location: raise Exception("Couldn't find location for table: {0}".format(str(self))) return location def open(self, mode): return NotImplementedError("open() is not supported for HivePartitionTarget") class ExternalHiveTask(luigi.ExternalTask): """ External task that depends on a Hive table/partition. """ database = luigi.Parameter(default='default') table = luigi.Parameter() # since this is an external task and will never be initialized from the CLI, partition can be any python object, in this case a dictionary partition = luigi.Parameter(default=None, description='Python dictionary specifying the target partition e.g. {"date": "2013-01-25"}') def output(self): if self.partition is not None: assert self.partition, "partition required" return HivePartitionTarget(table=self.table, partition=self.partition, database=self.database) else: return HiveTableTarget(self.table, self.database)
BugsInPy/BugsInPy/temp/projects/luigi/bug-28-fixed/luigi/luigi/contrib/hive.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-28-buggy/luigi/luigi/contrib/hive.py
luigi-bug-24
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import datetime import logging import os import random import re import signal import subprocess import sys import tempfile import time import shutil import importlib import tarfile try: import cPickle as pickle except ImportError: import pickle import warnings from luigi import six import luigi import luigi.format import luigi.contrib.hdfs from luigi import configuration logger = logging.getLogger('luigi-interface') class SparkRunContext(object): def __init__(self, proc): self.proc = proc def __enter__(self): self.__old_signal = signal.getsignal(signal.SIGTERM) signal.signal(signal.SIGTERM, self.kill_job) return self def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is KeyboardInterrupt: self.kill_job() signal.signal(signal.SIGTERM, self.__old_signal) def kill_job(self, captured_signal=None, stack_frame=None): self.proc.kill() if captured_signal is not None: # adding 128 gives the exit code corresponding to a signal sys.exit(128 + captured_signal) class SparkJobError(RuntimeError): def __init__(self, message, out=None, err=None): super(SparkJobError, self).__init__(message, out, err) self.message = message self.out = out self.err = err def __str__(self): info = self.message if self.out: info += "\nSTDOUT: " + str(self.out) if self.err: info += "\nSTDERR: " + str(self.err) return info class SparkSubmitTask(luigi.Task): """ Template task for running a Spark job Supports running jobs on Spark local, standalone, Mesos or Yarn See http://spark.apache.org/docs/latest/submitting-applications.html for more information """ # Application (.jar or .py file) name = None entry_class = None app = None def app_options(self): """ Subclass this method to map your task parameters to the app's arguments """ return [] @property def spark_submit(self): return configuration.get_config().get('spark', 'spark-submit', 'spark-submit') @property def master(self): return configuration.get_config().get("spark", "master", None) @property def deploy_mode(self): return configuration.get_config().get("spark", "deploy-mode", None) @property def jars(self): return self._list_config(configuration.get_config().get("spark", "jars", None)) @property def py_files(self): return self._list_config(configuration.get_config().get("spark", "py-files", None)) @property def files(self): return self._list_config(configuration.get_config().get("spark", "files", None)) @property def conf(self): return self._dict_config(configuration.get_config().get("spark", "conf", None)) @property def properties_file(self): return configuration.get_config().get("spark", "properties-file", None) @property def driver_memory(self): return configuration.get_config().get("spark", "driver-memory", None) @property def driver_java_options(self): return configuration.get_config().get("spark", "driver-java-options", None) @property def driver_library_path(self): return configuration.get_config().get("spark", "driver-library-path", None) @property def driver_class_path(self): return configuration.get_config().get("spark", "driver-class-path", None) @property def executor_memory(self): return configuration.get_config().get("spark", "executor-memory", None) @property def driver_cores(self): return configuration.get_config().get("spark", "driver-cores", None) @property def supervise(self): return bool(configuration.get_config().get("spark", "supervise", False)) @property def total_executor_cores(self): return configuration.get_config().get("spark", "total-executor-cores", None) @property def executor_cores(self): return configuration.get_config().get("spark", "executor-cores", None) @property def queue(self): return configuration.get_config().get("spark", "queue", None) @property def num_executors(self): return configuration.get_config().get("spark", "num-executors", None) @property def archives(self): return self._list_config(configuration.get_config().get("spark", "archives", None)) @property def hadoop_conf_dir(self): return configuration.get_config().get("spark", "hadoop-conf-dir", None) def get_environment(self): env = os.environ.copy() hadoop_conf_dir = self.hadoop_conf_dir if hadoop_conf_dir: env['HADOOP_CONF_DIR'] = hadoop_conf_dir return env def spark_command(self): command = [self.spark_submit] command += self._text_arg('--master', self.master) command += self._text_arg('--deploy-mode', self.deploy_mode) command += self._text_arg('--name', self.name) command += self._text_arg('--class', self.entry_class) command += self._list_arg('--jars', self.jars) command += self._list_arg('--py-files', self.py_files) command += self._list_arg('--files', self.files) command += self._list_arg('--archives', self.archives) command += self._dict_arg('--conf', self.conf) command += self._text_arg('--properties-file', self.properties_file) command += self._text_arg('--driver-memory', self.driver_memory) command += self._text_arg('--driver-java-options', self.driver_java_options) command += self._text_arg('--driver-library-path', self.driver_library_path) command += self._text_arg('--driver-class-path', self.driver_class_path) command += self._text_arg('--executor-memory', self.executor_memory) command += self._text_arg('--driver-cores', self.driver_cores) command += self._flag_arg('--supervise', self.supervise) command += self._text_arg('--total-executor-cores', self.total_executor_cores) command += self._text_arg('--executor-cores', self.executor_cores) command += self._text_arg('--queue', self.queue) command += self._text_arg('--num-executors', self.num_executors) return command def app_command(self): if not self.app: raise NotImplementedError("subclass should define an app (.jar or .py file)") return [self.app] + self.app_options() def run(self): args = list(map(str, self.spark_command() + self.app_command())) logger.info('Running: %s', repr(args)) tmp_stdout, tmp_stderr = tempfile.TemporaryFile(), tempfile.TemporaryFile() proc = subprocess.Popen(args, stdout=tmp_stdout, stderr=tmp_stderr, env=self.get_environment(), close_fds=True, universal_newlines=True) try: with SparkRunContext(proc): proc.wait() tmp_stdout.seek(0) stdout = "".join(map(lambda s: s.decode('utf-8'), tmp_stdout.readlines())) logger.info("Spark job stdout:\n{0}".format(stdout)) if proc.returncode != 0: tmp_stderr.seek(0) stderr = "".join(map(lambda s: s.decode('utf-8'), tmp_stderr.readlines())) raise SparkJobError('Spark job failed {0}'.format(repr(args)), out=stdout, err=stderr) finally: tmp_stderr.close() tmp_stdout.close() def _list_config(self, config): if config and isinstance(config, six.string_types): return list(map(lambda x: x.strip(), config.split(','))) def _dict_config(self, config): if config and isinstance(config, six.string_types): return dict(map(lambda i: i.split('='), config.split('|'))) def _text_arg(self, name, value): if value: return [name, value] return [] def _list_arg(self, name, value): if value and isinstance(value, (list, tuple)): return [name, ','.join(value)] return [] def _dict_arg(self, name, value): command = [] if value and isinstance(value, dict): for prop, value in value.items(): command += [name, '"{0}={1}"'.format(prop, value)] return command def _flag_arg(self, name, value): if value: return [name] return [] class PySparkTask(SparkSubmitTask): """ Template task for running an inline PySpark job Simply implement the ``main`` method in your subclass You can optionally define package names to be distributed to the cluster with ``py_packages`` (uses luigi's global py-packages configuration by default) """ # Path to the pyspark program passed to spark-submit app = os.path.join(os.path.dirname(__file__), 'pyspark_runner.py') # Python only supports the client deploy mode, force it deploy_mode = "client" @property def name(self): return self.__class__.__name__ @property def py_packages(self): packages = configuration.get_config().get('spark', 'py-packages', None) if packages: return map(lambda s: s.strip(), packages.split(',')) def setup(self, conf): """ Called by the pyspark_runner with a SparkConf instance that will be used to instantiate the SparkContext :param conf: SparkConf """ def setup_remote(self, sc): self._setup_packages(sc) def main(self, sc, *args): """ Called by the pyspark_runner with a SparkContext and any arguments returned by ``app_options()`` :param sc: SparkContext :param args: arguments list """ raise NotImplementedError("subclass should define a main method") def app_command(self): return [self.app, self.run_pickle] + self.app_options() def run(self): self.run_path = tempfile.mkdtemp(prefix=self.name) self.run_pickle = os.path.join(self.run_path, '.'.join([self.name.replace(' ', '_'), 'pickle'])) with open(self.run_pickle, 'wb') as fd: self._dump(fd) try: super(PySparkTask, self).run() finally: shutil.rmtree(self.run_path) def _dump(self, fd): if self.__module__ == '__main__': d = pickle.dumps(self) module_name = os.path.basename(sys.argv[0]).rsplit('.', 1)[0] d = d.replace(b'(c__main__', "(c" + module_name) fd.write(d) else: pickle.dump(self, fd) def _setup_packages(self, sc): """ This method compresses and uploads packages to the cluster """ packages = self.py_packages if not packages: return for package in packages: mod = importlib.import_module(package) try: mod_path = mod.__path__[0] except AttributeError: mod_path = mod.__file__ tar_path = os.path.join(self.run_path, package + '.tar.gz') tar = tarfile.open(tar_path, "w:gz") tar.add(mod_path, os.path.basename(mod_path)) tar.close() sc.addPyFile(tar_path) class SparkJob(luigi.Task): """ .. deprecated:: 1.1.1 Use ``SparkSubmitTask`` or ``PySparkTask`` instead. """ spark_workers = None spark_master_memory = None spark_worker_memory = None queue = luigi.Parameter(default=None, significant=False, positional=False) temp_hadoop_output_file = None def requires_local(self): """ Default impl - override this method if you need any local input to be accessible in init(). """ return [] def requires_hadoop(self): return self.requires() # default impl def input_local(self): return luigi.task.getpaths(self.requires_local()) def input(self): return luigi.task.getpaths(self.requires()) def deps(self): # Overrides the default implementation return luigi.task.flatten(self.requires_hadoop()) + luigi.task.flatten(self.requires_local()) def jar(self): raise NotImplementedError("subclass should define jar containing job_class") def job_class(self): raise NotImplementedError("subclass should define Spark job_class") def job_args(self): return [] def output(self): raise NotImplementedError("subclass should define HDFS output path") def run(self): warnings.warn("The use of SparkJob is deprecated. Please use SparkSubmitTask or PySparkTask.", stacklevel=2) original_output_path = self.output().path path_no_slash = original_output_path[:-2] if original_output_path.endswith('/*') else original_output_path path_no_slash = original_output_path[:-1] if original_output_path[-1] == '/' else path_no_slash tmp_output = luigi.contrib.hdfs.HdfsTarget(path_no_slash + '-luigi-tmp-%09d' % random.randrange(0, 1e10)) args = ['org.apache.spark.deploy.yarn.Client'] args += ['--jar', self.jar()] args += ['--class', self.job_class()] for a in self.job_args(): if a == self.output().path: # pass temporary output path to job args logger.info('Using temp path: %s for path %s', tmp_output.path, original_output_path) args += ['--args', tmp_output.path] else: args += ['--args', str(a)] if self.spark_workers is not None: args += ['--num-workers', self.spark_workers] if self.spark_master_memory is not None: args += ['--master-memory', self.spark_master_memory] if self.spark_worker_memory is not None: args += ['--worker-memory', self.spark_worker_memory] queue = self.queue if queue is not None: args += ['--queue', queue] env = os.environ.copy() env['SPARK_JAR'] = configuration.get_config().get('spark', 'spark-jar') env['HADOOP_CONF_DIR'] = configuration.get_config().get('spark', 'hadoop-conf-dir') env['MASTER'] = 'yarn-client' spark_class = configuration.get_config().get('spark', 'spark-class') temp_stderr = tempfile.TemporaryFile() logger.info('Running: %s %s', spark_class, ' '.join(args)) proc = subprocess.Popen([spark_class] + args, stdout=subprocess.PIPE, stderr=temp_stderr, env=env, close_fds=True) return_code, final_state, app_id = self.track_progress(proc) if return_code == 0 and final_state != 'FAILED': tmp_output.move(path_no_slash) elif final_state == 'FAILED': raise SparkJobError('Spark job failed: see yarn logs for %s' % app_id) else: temp_stderr.seek(0) errors = "".join((x.decode('utf8') for x in temp_stderr.readlines())) logger.error(errors) raise SparkJobError('Spark job failed', err=errors) def track_progress(self, proc): # The Spark client currently outputs a multiline status to stdout every second # while the application is running. This instead captures status data and updates # a single line of output until the application finishes. app_id = None app_status = 'N/A' url = 'N/A' final_state = None start = time.time() with SparkRunContext(proc) as context: while proc.poll() is None: s = proc.stdout.readline().decode('utf8') app_id_s = re.compile('application identifier: (\w+)').search(s) if app_id_s: app_id = app_id_s.group(1) context.app_id = app_id app_status_s = re.compile('yarnAppState: (\w+)').search(s) if app_status_s: app_status = app_status_s.group(1) url_s = re.compile('appTrackingUrl: (.+)').search(s) if url_s: url = url_s.group(1) final_state_s = re.compile('distributedFinalState: (\w+)').search(s) if final_state_s: final_state = final_state_s.group(1) if not app_id: logger.info(s.strip()) else: elapsed_mins, elapsed_secs = divmod(datetime.timedelta(seconds=time.time() - start).seconds, 60) status = '[%0d:%02d] Status: %s Tracking: %s' % (elapsed_mins, elapsed_secs, app_status, url) sys.stdout.write("\r\x1b[K" + status) sys.stdout.flush() logger.info(proc.communicate()[0]) return proc.returncode, final_state, app_id class Spark1xBackwardCompat(SparkSubmitTask): """ Adapts SparkSubmitTask interface to (Py)Spark1xJob interface """ # Old interface @property def master(self): return configuration.get_config().get("spark", "master", "yarn-client") def output(self): raise NotImplementedError("subclass should define an output target") def spark_options(self): return [] def dependency_jars(self): return [] def job_args(self): return [] # New interface @property def jars(self): return self.dependency_jars() def app_options(self): return self.job_args() def spark_command(self): return super(Spark1xBackwardCompat, self).spark_command() + self.spark_options() class Spark1xJob(Spark1xBackwardCompat): """ .. deprecated:: 1.1.1 Use ``SparkSubmitTask`` or ``PySparkTask`` instead. """ # Old interface def job_class(self): raise NotImplementedError("subclass should define Spark job_class") def jar(self): raise NotImplementedError("subclass should define jar containing job_class") # New interface @property def entry_class(self): return self.job_class() @property def app(self): return self.jar() def run(self): warnings.warn("The use of Spark1xJob is deprecated. Please use SparkSubmitTask or PySparkTask.", stacklevel=2) return super(Spark1xJob, self).run() class PySpark1xJob(Spark1xBackwardCompat): """ .. deprecated:: 1.1.1 Use ``SparkSubmitTask`` or ``PySparkTask`` instead. """ # Old interface def program(self): raise NotImplementedError("subclass should define Spark .py file") # New interface @property def app(self): return self.program() def run(self): warnings.warn("The use of PySpark1xJob is deprecated. Please use SparkSubmitTask or PySparkTask.", stacklevel=2) return super(PySpark1xJob, self).run() # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import datetime import logging import os import random import re import signal import subprocess import sys import tempfile import time import shutil import importlib import tarfile try: import cPickle as pickle except ImportError: import pickle import warnings from luigi import six import luigi import luigi.format import luigi.contrib.hdfs from luigi import configuration logger = logging.getLogger('luigi-interface') class SparkRunContext(object): def __init__(self, proc): self.proc = proc def __enter__(self): self.__old_signal = signal.getsignal(signal.SIGTERM) signal.signal(signal.SIGTERM, self.kill_job) return self def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is KeyboardInterrupt: self.kill_job() signal.signal(signal.SIGTERM, self.__old_signal) def kill_job(self, captured_signal=None, stack_frame=None): self.proc.kill() if captured_signal is not None: # adding 128 gives the exit code corresponding to a signal sys.exit(128 + captured_signal) class SparkJobError(RuntimeError): def __init__(self, message, out=None, err=None): super(SparkJobError, self).__init__(message, out, err) self.message = message self.out = out self.err = err def __str__(self): info = self.message if self.out: info += "\nSTDOUT: " + str(self.out) if self.err: info += "\nSTDERR: " + str(self.err) return info class SparkSubmitTask(luigi.Task): """ Template task for running a Spark job Supports running jobs on Spark local, standalone, Mesos or Yarn See http://spark.apache.org/docs/latest/submitting-applications.html for more information """ # Application (.jar or .py file) name = None entry_class = None app = None def app_options(self): """ Subclass this method to map your task parameters to the app's arguments """ return [] @property def spark_submit(self): return configuration.get_config().get('spark', 'spark-submit', 'spark-submit') @property def master(self): return configuration.get_config().get("spark", "master", None) @property def deploy_mode(self): return configuration.get_config().get("spark", "deploy-mode", None) @property def jars(self): return self._list_config(configuration.get_config().get("spark", "jars", None)) @property def py_files(self): return self._list_config(configuration.get_config().get("spark", "py-files", None)) @property def files(self): return self._list_config(configuration.get_config().get("spark", "files", None)) @property def conf(self): return self._dict_config(configuration.get_config().get("spark", "conf", None)) @property def properties_file(self): return configuration.get_config().get("spark", "properties-file", None) @property def driver_memory(self): return configuration.get_config().get("spark", "driver-memory", None) @property def driver_java_options(self): return configuration.get_config().get("spark", "driver-java-options", None) @property def driver_library_path(self): return configuration.get_config().get("spark", "driver-library-path", None) @property def driver_class_path(self): return configuration.get_config().get("spark", "driver-class-path", None) @property def executor_memory(self): return configuration.get_config().get("spark", "executor-memory", None) @property def driver_cores(self): return configuration.get_config().get("spark", "driver-cores", None) @property def supervise(self): return bool(configuration.get_config().get("spark", "supervise", False)) @property def total_executor_cores(self): return configuration.get_config().get("spark", "total-executor-cores", None) @property def executor_cores(self): return configuration.get_config().get("spark", "executor-cores", None) @property def queue(self): return configuration.get_config().get("spark", "queue", None) @property def num_executors(self): return configuration.get_config().get("spark", "num-executors", None) @property def archives(self): return self._list_config(configuration.get_config().get("spark", "archives", None)) @property def hadoop_conf_dir(self): return configuration.get_config().get("spark", "hadoop-conf-dir", None) def get_environment(self): env = os.environ.copy() hadoop_conf_dir = self.hadoop_conf_dir if hadoop_conf_dir: env['HADOOP_CONF_DIR'] = hadoop_conf_dir return env def spark_command(self): command = [self.spark_submit] command += self._text_arg('--master', self.master) command += self._text_arg('--deploy-mode', self.deploy_mode) command += self._text_arg('--name', self.name) command += self._text_arg('--class', self.entry_class) command += self._list_arg('--jars', self.jars) command += self._list_arg('--py-files', self.py_files) command += self._list_arg('--files', self.files) command += self._list_arg('--archives', self.archives) command += self._dict_arg('--conf', self.conf) command += self._text_arg('--properties-file', self.properties_file) command += self._text_arg('--driver-memory', self.driver_memory) command += self._text_arg('--driver-java-options', self.driver_java_options) command += self._text_arg('--driver-library-path', self.driver_library_path) command += self._text_arg('--driver-class-path', self.driver_class_path) command += self._text_arg('--executor-memory', self.executor_memory) command += self._text_arg('--driver-cores', self.driver_cores) command += self._flag_arg('--supervise', self.supervise) command += self._text_arg('--total-executor-cores', self.total_executor_cores) command += self._text_arg('--executor-cores', self.executor_cores) command += self._text_arg('--queue', self.queue) command += self._text_arg('--num-executors', self.num_executors) return command def app_command(self): if not self.app: raise NotImplementedError("subclass should define an app (.jar or .py file)") return [self.app] + self.app_options() def run(self): args = list(map(str, self.spark_command() + self.app_command())) logger.info('Running: %s', repr(args)) tmp_stdout, tmp_stderr = tempfile.TemporaryFile(), tempfile.TemporaryFile() proc = subprocess.Popen(args, stdout=tmp_stdout, stderr=tmp_stderr, env=self.get_environment(), close_fds=True, universal_newlines=True) try: with SparkRunContext(proc): proc.wait() tmp_stdout.seek(0) stdout = "".join(map(lambda s: s.decode('utf-8'), tmp_stdout.readlines())) logger.info("Spark job stdout:\n{0}".format(stdout)) if proc.returncode != 0: tmp_stderr.seek(0) stderr = "".join(map(lambda s: s.decode('utf-8'), tmp_stderr.readlines())) raise SparkJobError('Spark job failed {0}'.format(repr(args)), out=stdout, err=stderr) finally: tmp_stderr.close() tmp_stdout.close() def _list_config(self, config): if config and isinstance(config, six.string_types): return list(map(lambda x: x.strip(), config.split(','))) def _dict_config(self, config): if config and isinstance(config, six.string_types): return dict(map(lambda i: i.split('='), config.split('|'))) def _text_arg(self, name, value): if value: return [name, value] return [] def _list_arg(self, name, value): if value and isinstance(value, (list, tuple)): return [name, ','.join(value)] return [] def _dict_arg(self, name, value): command = [] if value and isinstance(value, dict): for prop, value in value.items(): command += [name, '"{0}={1}"'.format(prop, value)] return command def _flag_arg(self, name, value): if value: return [name] return [] class PySparkTask(SparkSubmitTask): """ Template task for running an inline PySpark job Simply implement the ``main`` method in your subclass You can optionally define package names to be distributed to the cluster with ``py_packages`` (uses luigi's global py-packages configuration by default) """ # Path to the pyspark program passed to spark-submit app = os.path.join(os.path.dirname(__file__), 'pyspark_runner.py') # Python only supports the client deploy mode, force it deploy_mode = "client" @property def name(self): return self.__class__.__name__ @property def py_packages(self): packages = configuration.get_config().get('spark', 'py-packages', None) if packages: return map(lambda s: s.strip(), packages.split(',')) def setup(self, conf): """ Called by the pyspark_runner with a SparkConf instance that will be used to instantiate the SparkContext :param conf: SparkConf """ def setup_remote(self, sc): self._setup_packages(sc) def main(self, sc, *args): """ Called by the pyspark_runner with a SparkContext and any arguments returned by ``app_options()`` :param sc: SparkContext :param args: arguments list """ raise NotImplementedError("subclass should define a main method") def app_command(self): return [self.app, self.run_pickle] + self.app_options() def run(self): self.run_path = tempfile.mkdtemp(prefix=self.name) self.run_pickle = os.path.join(self.run_path, '.'.join([self.name.replace(' ', '_'), 'pickle'])) with open(self.run_pickle, 'wb') as fd: self._dump(fd) try: super(PySparkTask, self).run() finally: shutil.rmtree(self.run_path) def _dump(self, fd): if self.__module__ == '__main__': d = pickle.dumps(self) module_name = os.path.basename(sys.argv[0]).rsplit('.', 1)[0] d = d.replace(b'(c__main__', "(c" + module_name) fd.write(d) else: pickle.dump(self, fd) def _setup_packages(self, sc): """ This method compresses and uploads packages to the cluster """ packages = self.py_packages if not packages: return for package in packages: mod = importlib.import_module(package) try: mod_path = mod.__path__[0] except AttributeError: mod_path = mod.__file__ tar_path = os.path.join(self.run_path, package + '.tar.gz') tar = tarfile.open(tar_path, "w:gz") tar.add(mod_path, os.path.basename(mod_path)) tar.close() sc.addPyFile(tar_path) class SparkJob(luigi.Task): """ .. deprecated:: 1.1.1 Use ``SparkSubmitTask`` or ``PySparkTask`` instead. """ spark_workers = None spark_master_memory = None spark_worker_memory = None queue = luigi.Parameter(default=None, significant=False, positional=False) temp_hadoop_output_file = None def requires_local(self): """ Default impl - override this method if you need any local input to be accessible in init(). """ return [] def requires_hadoop(self): return self.requires() # default impl def input_local(self): return luigi.task.getpaths(self.requires_local()) def input(self): return luigi.task.getpaths(self.requires()) def deps(self): # Overrides the default implementation return luigi.task.flatten(self.requires_hadoop()) + luigi.task.flatten(self.requires_local()) def jar(self): raise NotImplementedError("subclass should define jar containing job_class") def job_class(self): raise NotImplementedError("subclass should define Spark job_class") def job_args(self): return [] def output(self): raise NotImplementedError("subclass should define HDFS output path") def run(self): warnings.warn("The use of SparkJob is deprecated. Please use SparkSubmitTask or PySparkTask.", stacklevel=2) original_output_path = self.output().path path_no_slash = original_output_path[:-2] if original_output_path.endswith('/*') else original_output_path path_no_slash = original_output_path[:-1] if original_output_path[-1] == '/' else path_no_slash tmp_output = luigi.contrib.hdfs.HdfsTarget(path_no_slash + '-luigi-tmp-%09d' % random.randrange(0, 1e10)) args = ['org.apache.spark.deploy.yarn.Client'] args += ['--jar', self.jar()] args += ['--class', self.job_class()] for a in self.job_args(): if a == self.output().path: # pass temporary output path to job args logger.info('Using temp path: %s for path %s', tmp_output.path, original_output_path) args += ['--args', tmp_output.path] else: args += ['--args', str(a)] if self.spark_workers is not None: args += ['--num-workers', self.spark_workers] if self.spark_master_memory is not None: args += ['--master-memory', self.spark_master_memory] if self.spark_worker_memory is not None: args += ['--worker-memory', self.spark_worker_memory] queue = self.queue if queue is not None: args += ['--queue', queue] env = os.environ.copy() env['SPARK_JAR'] = configuration.get_config().get('spark', 'spark-jar') env['HADOOP_CONF_DIR'] = configuration.get_config().get('spark', 'hadoop-conf-dir') env['MASTER'] = 'yarn-client' spark_class = configuration.get_config().get('spark', 'spark-class') temp_stderr = tempfile.TemporaryFile() logger.info('Running: %s %s', spark_class, ' '.join(args)) proc = subprocess.Popen([spark_class] + args, stdout=subprocess.PIPE, stderr=temp_stderr, env=env, close_fds=True) return_code, final_state, app_id = self.track_progress(proc) if return_code == 0 and final_state != 'FAILED': tmp_output.move(path_no_slash) elif final_state == 'FAILED': raise SparkJobError('Spark job failed: see yarn logs for %s' % app_id) else: temp_stderr.seek(0) errors = "".join((x.decode('utf8') for x in temp_stderr.readlines())) logger.error(errors) raise SparkJobError('Spark job failed', err=errors) def track_progress(self, proc): # The Spark client currently outputs a multiline status to stdout every second # while the application is running. This instead captures status data and updates # a single line of output until the application finishes. app_id = None app_status = 'N/A' url = 'N/A' final_state = None start = time.time() with SparkRunContext(proc) as context: while proc.poll() is None: s = proc.stdout.readline().decode('utf8') app_id_s = re.compile('application identifier: (\w+)').search(s) if app_id_s: app_id = app_id_s.group(1) context.app_id = app_id app_status_s = re.compile('yarnAppState: (\w+)').search(s) if app_status_s: app_status = app_status_s.group(1) url_s = re.compile('appTrackingUrl: (.+)').search(s) if url_s: url = url_s.group(1) final_state_s = re.compile('distributedFinalState: (\w+)').search(s) if final_state_s: final_state = final_state_s.group(1) if not app_id: logger.info(s.strip()) else: elapsed_mins, elapsed_secs = divmod(datetime.timedelta(seconds=time.time() - start).seconds, 60) status = '[%0d:%02d] Status: %s Tracking: %s' % (elapsed_mins, elapsed_secs, app_status, url) sys.stdout.write("\r\x1b[K" + status) sys.stdout.flush() logger.info(proc.communicate()[0]) return proc.returncode, final_state, app_id class Spark1xBackwardCompat(SparkSubmitTask): """ Adapts SparkSubmitTask interface to (Py)Spark1xJob interface """ # Old interface @property def master(self): return configuration.get_config().get("spark", "master", "yarn-client") def output(self): raise NotImplementedError("subclass should define an output target") def spark_options(self): return [] def dependency_jars(self): return [] def job_args(self): return [] # New interface @property def jars(self): return self.dependency_jars() def app_options(self): return self.job_args() def spark_command(self): return super(Spark1xBackwardCompat, self).spark_command() + self.spark_options() class Spark1xJob(Spark1xBackwardCompat): """ .. deprecated:: 1.1.1 Use ``SparkSubmitTask`` or ``PySparkTask`` instead. """ # Old interface def job_class(self): raise NotImplementedError("subclass should define Spark job_class") def jar(self): raise NotImplementedError("subclass should define jar containing job_class") # New interface @property def entry_class(self): return self.job_class() @property def app(self): return self.jar() def run(self): warnings.warn("The use of Spark1xJob is deprecated. Please use SparkSubmitTask or PySparkTask.", stacklevel=2) return super(Spark1xJob, self).run() class PySpark1xJob(Spark1xBackwardCompat): """ .. deprecated:: 1.1.1 Use ``SparkSubmitTask`` or ``PySparkTask`` instead. """ # Old interface def program(self): raise NotImplementedError("subclass should define Spark .py file") # New interface @property def app(self): return self.program() def run(self): warnings.warn("The use of PySpark1xJob is deprecated. Please use SparkSubmitTask or PySparkTask.", stacklevel=2) return super(PySpark1xJob, self).run()
BugsInPy/BugsInPy/temp/projects/luigi/bug-24-fixed/luigi/luigi/contrib/spark.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-24-buggy/luigi/luigi/contrib/spark.py
luigi-bug-3
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ''' Parameters are one of the core concepts of Luigi. All Parameters sit on :class:`~luigi.task.Task` classes. See :ref:`Parameter` for more info on how to define parameters. ''' import abc import datetime import warnings from enum import IntEnum import json from json import JSONEncoder from collections import OrderedDict, Mapping import operator import functools from ast import literal_eval try: from ConfigParser import NoOptionError, NoSectionError except ImportError: from configparser import NoOptionError, NoSectionError from luigi import date_interval from luigi import task_register from luigi import six from luigi import configuration from luigi.cmdline_parser import CmdlineParser _no_value = object() class ParameterVisibility(IntEnum): """ Possible values for the parameter visibility option. Public is the default. See :doc:`/parameters` for more info. """ PUBLIC = 0 HIDDEN = 1 PRIVATE = 2 @classmethod def has_value(cls, value): return any(value == item.value for item in cls) def serialize(self): return self.value class ParameterException(Exception): """ Base exception. """ pass class MissingParameterException(ParameterException): """ Exception signifying that there was a missing Parameter. """ pass class UnknownParameterException(ParameterException): """ Exception signifying that an unknown Parameter was supplied. """ pass class DuplicateParameterException(ParameterException): """ Exception signifying that a Parameter was specified multiple times. """ pass class Parameter(object): """ Parameter whose value is a ``str``, and a base class for other parameter types. Parameters are objects set on the Task class level to make it possible to parameterize tasks. For instance: .. code:: python class MyTask(luigi.Task): foo = luigi.Parameter() class RequiringTask(luigi.Task): def requires(self): return MyTask(foo="hello") def run(self): print(self.requires().foo) # prints "hello" This makes it possible to instantiate multiple tasks, eg ``MyTask(foo='bar')`` and ``MyTask(foo='baz')``. The task will then have the ``foo`` attribute set appropriately. When a task is instantiated, it will first use any argument as the value of the parameter, eg. if you instantiate ``a = TaskA(x=44)`` then ``a.x == 44``. When the value is not provided, the value will be resolved in this order of falling priority: * Any value provided on the command line: - To the root task (eg. ``--param xyz``) - Then to the class, using the qualified task name syntax (eg. ``--TaskA-param xyz``). * With ``[TASK_NAME]>PARAM_NAME: <serialized value>`` syntax. See :ref:`ParamConfigIngestion` * Any default value set using the ``default`` flag. Parameter objects may be reused, but you must then set the ``positional=False`` flag. """ _counter = 0 # non-atomically increasing counter used for ordering parameters. def __init__(self, default=_no_value, is_global=False, significant=True, description=None, config_path=None, positional=True, always_in_help=False, batch_method=None, visibility=ParameterVisibility.PUBLIC): """ :param default: the default value for this parameter. This should match the type of the Parameter, i.e. ``datetime.date`` for ``DateParameter`` or ``int`` for ``IntParameter``. By default, no default is stored and the value must be specified at runtime. :param bool significant: specify ``False`` if the parameter should not be treated as part of the unique identifier for a Task. An insignificant Parameter might also be used to specify a password or other sensitive information that should not be made public via the scheduler. Default: ``True``. :param str description: A human-readable string describing the purpose of this Parameter. For command-line invocations, this will be used as the `help` string shown to users. Default: ``None``. :param dict config_path: a dictionary with entries ``section`` and ``name`` specifying a config file entry from which to read the default value for this parameter. DEPRECATED. Default: ``None``. :param bool positional: If true, you can set the argument as a positional argument. It's true by default but we recommend ``positional=False`` for abstract base classes and similar cases. :param bool always_in_help: For the --help option in the command line parsing. Set true to always show in --help. :param function(iterable[A])->A batch_method: Method to combine an iterable of parsed parameter values into a single value. Used when receiving batched parameter lists from the scheduler. See :ref:`batch_method` :param visibility: A Parameter whose value is a :py:class:`~luigi.parameter.ParameterVisibility`. Default value is ParameterVisibility.PUBLIC """ self._default = default self._batch_method = batch_method if is_global: warnings.warn("is_global support is removed. Assuming positional=False", DeprecationWarning, stacklevel=2) positional = False self.significant = significant # Whether different values for this parameter will differentiate otherwise equal tasks self.positional = positional self.visibility = visibility if ParameterVisibility.has_value(visibility) else ParameterVisibility.PUBLIC self.description = description self.always_in_help = always_in_help if config_path is not None and ('section' not in config_path or 'name' not in config_path): raise ParameterException('config_path must be a hash containing entries for section and name') self._config_path = config_path self._counter = Parameter._counter # We need to keep track of this to get the order right (see Task class) Parameter._counter += 1 def _get_value_from_config(self, section, name): """Loads the default from the config. Returns _no_value if it doesn't exist""" conf = configuration.get_config() try: value = conf.get(section, name) except (NoSectionError, NoOptionError, KeyError): return _no_value return self.parse(value) def _get_value(self, task_name, param_name): for value, warn in self._value_iterator(task_name, param_name): if value != _no_value: if warn: warnings.warn(warn, DeprecationWarning) return value return _no_value def _value_iterator(self, task_name, param_name): """ Yield the parameter values, with optional deprecation warning as second tuple value. The parameter value will be whatever non-_no_value that is yielded first. """ cp_parser = CmdlineParser.get_instance() if cp_parser: dest = self._parser_global_dest(param_name, task_name) found = getattr(cp_parser.known_args, dest, None) yield (self._parse_or_no_value(found), None) yield (self._get_value_from_config(task_name, param_name), None) yield (self._get_value_from_config(task_name, param_name.replace('_', '-')), 'Configuration [{}] {} (with dashes) should be avoided. Please use underscores.'.format( task_name, param_name)) if self._config_path: yield (self._get_value_from_config(self._config_path['section'], self._config_path['name']), 'The use of the configuration [{}] {} is deprecated. Please use [{}] {}'.format( self._config_path['section'], self._config_path['name'], task_name, param_name)) yield (self._default, None) def has_task_value(self, task_name, param_name): return self._get_value(task_name, param_name) != _no_value def task_value(self, task_name, param_name): value = self._get_value(task_name, param_name) if value == _no_value: raise MissingParameterException("No default specified") else: return self.normalize(value) def _is_batchable(self): return self._batch_method is not None def parse(self, x): """ Parse an individual value from the input. The default implementation is the identity function, but subclasses should override this method for specialized parsing. :param str x: the value to parse. :return: the parsed value. """ return x # default impl def _parse_list(self, xs): """ Parse a list of values from the scheduler. Only possible if this is_batchable() is True. This will combine the list into a single parameter value using batch method. This should never need to be overridden. :param xs: list of values to parse and combine :return: the combined parsed values """ if not self._is_batchable(): raise NotImplementedError('No batch method found') elif not xs: raise ValueError('Empty parameter list passed to parse_list') else: return self._batch_method(map(self.parse, xs)) def serialize(self, x): """ Opposite of :py:meth:`parse`. Converts the value ``x`` to a string. :param x: the value to serialize. """ return str(x) def _warn_on_wrong_param_type(self, param_name, param_value): if self.__class__ != Parameter: return if not isinstance(param_value, six.string_types): warnings.warn('Parameter "{}" with value "{}" is not of type string.'.format(param_name, param_value)) def normalize(self, x): """ Given a parsed parameter value, normalizes it. The value can either be the result of parse(), the default value or arguments passed into the task's constructor by instantiation. This is very implementation defined, but can be used to validate/clamp valid values. For example, if you wanted to only accept even integers, and "correct" odd values to the nearest integer, you can implement normalize as ``x // 2 * 2``. """ return x # default impl def next_in_enumeration(self, _value): """ If your Parameter type has an enumerable ordering of values. You can choose to override this method. This method is used by the :py:mod:`luigi.execution_summary` module for pretty printing purposes. Enabling it to pretty print tasks like ``MyTask(num=1), MyTask(num=2), MyTask(num=3)`` to ``MyTask(num=1..3)``. :param value: The value :return: The next value, like "value + 1". Or ``None`` if there's no enumerable ordering. """ return None def _parse_or_no_value(self, x): if not x: return _no_value else: return self.parse(x) @staticmethod def _parser_global_dest(param_name, task_name): return task_name + '_' + param_name @classmethod def _parser_kwargs(cls, param_name, task_name=None): return { "action": "store", "dest": cls._parser_global_dest(param_name, task_name) if task_name else param_name, } class OptionalParameter(Parameter): """ A Parameter that treats empty string as None """ def serialize(self, x): if x is None: return '' else: return str(x) def parse(self, x): return x or None def _warn_on_wrong_param_type(self, param_name, param_value): if self.__class__ != OptionalParameter: return if not isinstance(param_value, six.string_types) and param_value is not None: warnings.warn('OptionalParameter "{}" with value "{}" is not of type string or None.'.format( param_name, param_value)) _UNIX_EPOCH = datetime.datetime.utcfromtimestamp(0) class _DateParameterBase(Parameter): """ Base class Parameter for date (not datetime). """ def __init__(self, interval=1, start=None, **kwargs): super(_DateParameterBase, self).__init__(**kwargs) self.interval = interval self.start = start if start is not None else _UNIX_EPOCH.date() @abc.abstractproperty def date_format(self): """ Override me with a :py:meth:`~datetime.date.strftime` string. """ pass def parse(self, s): """ Parses a date string formatted like ``YYYY-MM-DD``. """ return datetime.datetime.strptime(s, self.date_format).date() def serialize(self, dt): """ Converts the date to a string using the :py:attr:`~_DateParameterBase.date_format`. """ if dt is None: return str(dt) return dt.strftime(self.date_format) class DateParameter(_DateParameterBase): """ Parameter whose value is a :py:class:`~datetime.date`. A DateParameter is a Date string formatted ``YYYY-MM-DD``. For example, ``2013-07-10`` specifies July 10, 2013. DateParameters are 90% of the time used to be interpolated into file system paths or the like. Here is a gentle reminder of how to interpolate date parameters into strings: .. code:: python class MyTask(luigi.Task): date = luigi.DateParameter() def run(self): templated_path = "/my/path/to/my/dataset/{date:%Y/%m/%d}/" instantiated_path = templated_path.format(date=self.date) # print(instantiated_path) --> /my/path/to/my/dataset/2016/06/09/ # ... use instantiated_path ... To set this parameter to default to the current day. You can write code like this: .. code:: python import datetime class MyTask(luigi.Task): date = luigi.DateParameter(default=datetime.date.today()) """ date_format = '%Y-%m-%d' def next_in_enumeration(self, value): return value + datetime.timedelta(days=self.interval) def normalize(self, value): if value is None: return None if isinstance(value, datetime.datetime): value = value.date() delta = (value - self.start).days % self.interval return value - datetime.timedelta(days=delta) class MonthParameter(DateParameter): """ Parameter whose value is a :py:class:`~datetime.date`, specified to the month (day of :py:class:`~datetime.date` is "rounded" to first of the month). A MonthParameter is a Date string formatted ``YYYY-MM``. For example, ``2013-07`` specifies July of 2013. Task objects constructed from code accept :py:class:`~datetime.date` (ignoring the day value) or :py:class:`~luigi.date_interval.Month`. """ date_format = '%Y-%m' def _add_months(self, date, months): """ Add ``months`` months to ``date``. Unfortunately we can't use timedeltas to add months because timedelta counts in days and there's no foolproof way to add N months in days without counting the number of days per month. """ year = date.year + (date.month + months - 1) // 12 month = (date.month + months - 1) % 12 + 1 return datetime.date(year=year, month=month, day=1) def next_in_enumeration(self, value): return self._add_months(value, self.interval) def normalize(self, value): if value is None: return None if isinstance(value, date_interval.Month): value = value.date_a months_since_start = (value.year - self.start.year) * 12 + (value.month - self.start.month) months_since_start -= months_since_start % self.interval return self._add_months(self.start, months_since_start) class YearParameter(DateParameter): """ Parameter whose value is a :py:class:`~datetime.date`, specified to the year (day and month of :py:class:`~datetime.date` is "rounded" to first day of the year). A YearParameter is a Date string formatted ``YYYY``. Task objects constructed from code accept :py:class:`~datetime.date` (ignoring the month and day values) or :py:class:`~luigi.date_interval.Year`. """ date_format = '%Y' def next_in_enumeration(self, value): return value.replace(year=value.year + self.interval) def normalize(self, value): if value is None: return None if isinstance(value, date_interval.Year): value = value.date_a delta = (value.year - self.start.year) % self.interval return datetime.date(year=value.year - delta, month=1, day=1) class _DatetimeParameterBase(Parameter): """ Base class Parameter for datetime """ def __init__(self, interval=1, start=None, **kwargs): super(_DatetimeParameterBase, self).__init__(**kwargs) self.interval = interval self.start = start if start is not None else _UNIX_EPOCH @abc.abstractproperty def date_format(self): """ Override me with a :py:meth:`~datetime.date.strftime` string. """ pass @abc.abstractproperty def _timedelta(self): """ How to move one interval of this type forward (i.e. not counting self.interval). """ pass def parse(self, s): """ Parses a string to a :py:class:`~datetime.datetime`. """ return datetime.datetime.strptime(s, self.date_format) def serialize(self, dt): """ Converts the date to a string using the :py:attr:`~_DatetimeParameterBase.date_format`. """ if dt is None: return str(dt) return dt.strftime(self.date_format) @staticmethod def _convert_to_dt(dt): if not isinstance(dt, datetime.datetime): dt = datetime.datetime.combine(dt, datetime.time.min) return dt def normalize(self, dt): """ Clamp dt to every Nth :py:attr:`~_DatetimeParameterBase.interval` starting at :py:attr:`~_DatetimeParameterBase.start`. """ if dt is None: return None dt = self._convert_to_dt(dt) dt = dt.replace(microsecond=0) # remove microseconds, to avoid float rounding issues. delta = (dt - self.start).total_seconds() granularity = (self._timedelta * self.interval).total_seconds() return dt - datetime.timedelta(seconds=delta % granularity) def next_in_enumeration(self, value): return value + self._timedelta * self.interval class DateHourParameter(_DatetimeParameterBase): """ Parameter whose value is a :py:class:`~datetime.datetime` specified to the hour. A DateHourParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted date and time specified to the hour. For example, ``2013-07-10T19`` specifies July 10, 2013 at 19:00. """ date_format = '%Y-%m-%dT%H' # ISO 8601 is to use 'T' _timedelta = datetime.timedelta(hours=1) class DateMinuteParameter(_DatetimeParameterBase): """ Parameter whose value is a :py:class:`~datetime.datetime` specified to the minute. A DateMinuteParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted date and time specified to the minute. For example, ``2013-07-10T1907`` specifies July 10, 2013 at 19:07. The interval parameter can be used to clamp this parameter to every N minutes, instead of every minute. """ date_format = '%Y-%m-%dT%H%M' _timedelta = datetime.timedelta(minutes=1) deprecated_date_format = '%Y-%m-%dT%HH%M' def parse(self, s): try: value = datetime.datetime.strptime(s, self.deprecated_date_format) warnings.warn( 'Using "H" between hours and minutes is deprecated, omit it instead.', DeprecationWarning, stacklevel=2 ) return value except ValueError: return super(DateMinuteParameter, self).parse(s) class DateSecondParameter(_DatetimeParameterBase): """ Parameter whose value is a :py:class:`~datetime.datetime` specified to the second. A DateSecondParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted date and time specified to the second. For example, ``2013-07-10T190738`` specifies July 10, 2013 at 19:07:38. The interval parameter can be used to clamp this parameter to every N seconds, instead of every second. """ date_format = '%Y-%m-%dT%H%M%S' _timedelta = datetime.timedelta(seconds=1) class IntParameter(Parameter): """ Parameter whose value is an ``int``. """ def parse(self, s): """ Parses an ``int`` from the string using ``int()``. """ return int(s) def next_in_enumeration(self, value): return value + 1 class FloatParameter(Parameter): """ Parameter whose value is a ``float``. """ def parse(self, s): """ Parses a ``float`` from the string using ``float()``. """ return float(s) class BoolParameter(Parameter): """ A Parameter whose value is a ``bool``. This parameter has an implicit default value of ``False``. For the command line interface this means that the value is ``False`` unless you add ``"--the-bool-parameter"`` to your command without giving a parameter value. This is considered *implicit* parsing (the default). However, in some situations one might want to give the explicit bool value (``"--the-bool-parameter true|false"``), e.g. when you configure the default value to be ``True``. This is called *explicit* parsing. When omitting the parameter value, it is still considered ``True`` but to avoid ambiguities during argument parsing, make sure to always place bool parameters behind the task family on the command line when using explicit parsing. You can toggle between the two parsing modes on a per-parameter base via .. code-block:: python class MyTask(luigi.Task): implicit_bool = luigi.BoolParameter(parsing=luigi.BoolParameter.IMPLICIT_PARSING) explicit_bool = luigi.BoolParameter(parsing=luigi.BoolParameter.EXPLICIT_PARSING) or globally by .. code-block:: python luigi.BoolParameter.parsing = luigi.BoolParameter.EXPLICIT_PARSING for all bool parameters instantiated after this line. """ IMPLICIT_PARSING = "implicit" EXPLICIT_PARSING = "explicit" parsing = IMPLICIT_PARSING def __init__(self, *args, **kwargs): self.parsing = kwargs.pop("parsing", self.__class__.parsing) super(BoolParameter, self).__init__(*args, **kwargs) if self._default == _no_value: self._default = False def parse(self, val): """ Parses a ``bool`` from the string, matching 'true' or 'false' ignoring case. """ s = str(val).lower() if s == "true": return True elif s == "false": return False else: raise ValueError("cannot interpret '{}' as boolean".format(val)) def normalize(self, value): try: return self.parse(value) except ValueError: return None def _parser_kwargs(self, *args, **kwargs): parser_kwargs = super(BoolParameter, self)._parser_kwargs(*args, **kwargs) if self.parsing == self.IMPLICIT_PARSING: parser_kwargs["action"] = "store_true" elif self.parsing == self.EXPLICIT_PARSING: parser_kwargs["nargs"] = "?" parser_kwargs["const"] = True else: raise ValueError("unknown parsing value '{}'".format(self.parsing)) return parser_kwargs class DateIntervalParameter(Parameter): """ A Parameter whose value is a :py:class:`~luigi.date_interval.DateInterval`. Date Intervals are specified using the ISO 8601 date notation for dates (eg. "2015-11-04"), months (eg. "2015-05"), years (eg. "2015"), or weeks (eg. "2015-W35"). In addition, it also supports arbitrary date intervals provided as two dates separated with a dash (eg. "2015-11-04-2015-12-04"). """ def parse(self, s): """ Parses a :py:class:`~luigi.date_interval.DateInterval` from the input. see :py:mod:`luigi.date_interval` for details on the parsing of DateIntervals. """ # TODO: can we use xml.utils.iso8601 or something similar? from luigi import date_interval as d for cls in [d.Year, d.Month, d.Week, d.Date, d.Custom]: i = cls.parse(s) if i: return i raise ValueError('Invalid date interval - could not be parsed') class TimeDeltaParameter(Parameter): """ Class that maps to timedelta using strings in any of the following forms: * ``n {w[eek[s]]|d[ay[s]]|h[our[s]]|m[inute[s]|s[second[s]]}`` (e.g. "1 week 2 days" or "1 h") Note: multiple arguments must be supplied in longest to shortest unit order * ISO 8601 duration ``PnDTnHnMnS`` (each field optional, years and months not supported) * ISO 8601 duration ``PnW`` See https://en.wikipedia.org/wiki/ISO_8601#Durations """ def _apply_regex(self, regex, input): import re re_match = re.match(regex, input) if re_match and any(re_match.groups()): kwargs = {} has_val = False for k, v in six.iteritems(re_match.groupdict(default="0")): val = int(v) if val > -1: has_val = True kwargs[k] = val if has_val: return datetime.timedelta(**kwargs) def _parseIso8601(self, input): def field(key): return r"(?P<%s>\d+)%s" % (key, key[0].upper()) def optional_field(key): return "(%s)?" % field(key) # A little loose: ISO 8601 does not allow weeks in combination with other fields, but this regex does (as does python timedelta) regex = "P(%s|%s(T%s)?)" % (field("weeks"), optional_field("days"), "".join([optional_field(key) for key in ["hours", "minutes", "seconds"]])) return self._apply_regex(regex, input) def _parseSimple(self, input): keys = ["weeks", "days", "hours", "minutes", "seconds"] # Give the digits a regex group name from the keys, then look for text with the first letter of the key, # optionally followed by the rest of the word, with final char (the "s") optional regex = "".join([r"((?P<%s>\d+) ?%s(%s)?(%s)? ?)?" % (k, k[0], k[1:-1], k[-1]) for k in keys]) return self._apply_regex(regex, input) def parse(self, input): """ Parses a time delta from the input. See :py:class:`TimeDeltaParameter` for details on supported formats. """ result = self._parseIso8601(input) if not result: result = self._parseSimple(input) if result is not None: return result else: raise ParameterException("Invalid time delta - could not parse %s" % input) def serialize(self, x): """ Converts datetime.timedelta to a string :param x: the value to serialize. """ weeks = x.days // 7 days = x.days % 7 hours = x.seconds // 3600 minutes = (x.seconds % 3600) // 60 seconds = (x.seconds % 3600) % 60 result = "{} w {} d {} h {} m {} s".format(weeks, days, hours, minutes, seconds) return result def _warn_on_wrong_param_type(self, param_name, param_value): if self.__class__ != TimeDeltaParameter: return if not isinstance(param_value, datetime.timedelta): warnings.warn('Parameter "{}" with value "{}" is not of type timedelta.'.format(param_name, param_value)) class TaskParameter(Parameter): """ A parameter that takes another luigi task class. When used programatically, the parameter should be specified directly with the :py:class:`luigi.task.Task` (sub) class. Like ``MyMetaTask(my_task_param=my_tasks.MyTask)``. On the command line, you specify the :py:meth:`luigi.task.Task.get_task_family`. Like .. code-block:: console $ luigi --module my_tasks MyMetaTask --my_task_param my_namespace.MyTask Where ``my_namespace.MyTask`` is defined in the ``my_tasks`` python module. When the :py:class:`luigi.task.Task` class is instantiated to an object. The value will always be a task class (and not a string). """ def parse(self, input): """ Parse a task_famly using the :class:`~luigi.task_register.Register` """ return task_register.Register.get_task_cls(input) def serialize(self, cls): """ Converts the :py:class:`luigi.task.Task` (sub) class to its family name. """ return cls.get_task_family() class EnumParameter(Parameter): """ A parameter whose value is an :class:`~enum.Enum`. In the task definition, use .. code-block:: python class Model(enum.Enum): Honda = 1 Volvo = 2 class MyTask(luigi.Task): my_param = luigi.EnumParameter(enum=Model) At the command line, use, .. code-block:: console $ luigi --module my_tasks MyTask --my-param Honda """ def __init__(self, *args, **kwargs): if 'enum' not in kwargs: raise ParameterException('An enum class must be specified.') self._enum = kwargs.pop('enum') super(EnumParameter, self).__init__(*args, **kwargs) def parse(self, s): try: return self._enum[s] except KeyError: raise ValueError('Invalid enum value - could not be parsed') def serialize(self, e): return e.name class _FrozenOrderedDict(Mapping): """ It is an immutable wrapper around ordered dictionaries that implements the complete :py:class:`collections.Mapping` interface. It can be used as a drop-in replacement for dictionaries where immutability and ordering are desired. """ def __init__(self, *args, **kwargs): self.__dict = OrderedDict(*args, **kwargs) self.__hash = None def __getitem__(self, key): return self.__dict[key] def __iter__(self): return iter(self.__dict) def __len__(self): return len(self.__dict) def __repr__(self): return '<FrozenOrderedDict %s>' % repr(self.__dict) def __hash__(self): if self.__hash is None: hashes = map(hash, self.items()) self.__hash = functools.reduce(operator.xor, hashes, 0) return self.__hash def get_wrapped(self): return self.__dict def _recursively_freeze(value): """ Recursively walks ``Mapping``s and ``list``s and converts them to ``_FrozenOrderedDict`` and ``tuples``, respectively. """ if isinstance(value, Mapping): return _FrozenOrderedDict(((k, _recursively_freeze(v)) for k, v in value.items())) elif isinstance(value, list) or isinstance(value, tuple): return tuple(_recursively_freeze(v) for v in value) return value class _DictParamEncoder(JSONEncoder): """ JSON encoder for :py:class:`~DictParameter`, which makes :py:class:`~_FrozenOrderedDict` JSON serializable. """ def default(self, obj): if isinstance(obj, _FrozenOrderedDict): return obj.get_wrapped() return json.JSONEncoder.default(self, obj) class DictParameter(Parameter): """ Parameter whose value is a ``dict``. In the task definition, use .. code-block:: python class MyTask(luigi.Task): tags = luigi.DictParameter() def run(self): logging.info("Find server with role: %s", self.tags['role']) server = aws.ec2.find_my_resource(self.tags) At the command line, use .. code-block:: console $ luigi --module my_tasks MyTask --tags <JSON string> Simple example with two tags: .. code-block:: console $ luigi --module my_tasks MyTask --tags '{"role": "web", "env": "staging"}' It can be used to define dynamic parameters, when you do not know the exact list of your parameters (e.g. list of tags, that are dynamically constructed outside Luigi), or you have a complex parameter containing logically related values (like a database connection config). """ def normalize(self, value): """ Ensure that dictionary parameter is converted to a _FrozenOrderedDict so it can be hashed. """ return _recursively_freeze(value) def parse(self, s): """ Parses an immutable and ordered ``dict`` from a JSON string using standard JSON library. We need to use an immutable dictionary, to create a hashable parameter and also preserve the internal structure of parsing. The traversal order of standard ``dict`` is undefined, which can result various string representations of this parameter, and therefore a different task id for the task containing this parameter. This is because task id contains the hash of parameters' JSON representation. :param s: String to be parse """ return json.loads(s, object_pairs_hook=_FrozenOrderedDict) def serialize(self, x): return json.dumps(x, cls=_DictParamEncoder) class ListParameter(Parameter): """ Parameter whose value is a ``list``. In the task definition, use .. code-block:: python class MyTask(luigi.Task): grades = luigi.ListParameter() def run(self): sum = 0 for element in self.grades: sum += element avg = sum / len(self.grades) At the command line, use .. code-block:: console $ luigi --module my_tasks MyTask --grades <JSON string> Simple example with two grades: .. code-block:: console $ luigi --module my_tasks MyTask --grades '[100,70]' """ def normalize(self, x): """ Ensure that struct is recursively converted to a tuple so it can be hashed. :param str x: the value to parse. :return: the normalized (hashable/immutable) value. """ return _recursively_freeze(x) def parse(self, x): """ Parse an individual value from the input. :param str x: the value to parse. :return: the parsed value. """ return list(json.loads(x, object_pairs_hook=_FrozenOrderedDict)) def serialize(self, x): """ Opposite of :py:meth:`parse`. Converts the value ``x`` to a string. :param x: the value to serialize. """ return json.dumps(x, cls=_DictParamEncoder) class TupleParameter(ListParameter): """ Parameter whose value is a ``tuple`` or ``tuple`` of tuples. In the task definition, use .. code-block:: python class MyTask(luigi.Task): book_locations = luigi.TupleParameter() def run(self): for location in self.book_locations: print("Go to page %d, line %d" % (location[0], location[1])) At the command line, use .. code-block:: console $ luigi --module my_tasks MyTask --book_locations <JSON string> Simple example with two grades: .. code-block:: console $ luigi --module my_tasks MyTask --book_locations '((12,3),(4,15),(52,1))' """ def parse(self, x): """ Parse an individual value from the input. :param str x: the value to parse. :return: the parsed value. """ # Since the result of json.dumps(tuple) differs from a tuple string, we must handle either case. # A tuple string may come from a config file or from cli execution. # t = ((1, 2), (3, 4)) # t_str = '((1,2),(3,4))' # t_json_str = json.dumps(t) # t_json_str == '[[1, 2], [3, 4]]' # json.loads(t_json_str) == t # json.loads(t_str) == ValueError: No JSON object could be decoded # Therefore, if json.loads(x) returns a ValueError, try ast.literal_eval(x). # ast.literal_eval(t_str) == t try: # loop required to parse tuple of tuples return tuple(tuple(x) for x in json.loads(x, object_pairs_hook=_FrozenOrderedDict)) except ValueError: return literal_eval(x) # if this causes an error, let that error be raised. class NumericalParameter(Parameter): """ Parameter whose value is a number of the specified type, e.g. ``int`` or ``float`` and in the range specified. In the task definition, use .. code-block:: python class MyTask(luigi.Task): my_param_1 = luigi.NumericalParameter( var_type=int, min_value=-3, max_value=7) # -3 <= my_param_1 < 7 my_param_2 = luigi.NumericalParameter( var_type=int, min_value=-3, max_value=7, left_op=operator.lt, right_op=operator.le) # -3 < my_param_2 <= 7 At the command line, use .. code-block:: console $ luigi --module my_tasks MyTask --my-param-1 -3 --my-param-2 -2 """ def __init__(self, left_op=operator.le, right_op=operator.lt, *args, **kwargs): """ :param function var_type: The type of the input variable, e.g. int or float. :param min_value: The minimum value permissible in the accepted values range. May be inclusive or exclusive based on left_op parameter. This should be the same type as var_type. :param max_value: The maximum value permissible in the accepted values range. May be inclusive or exclusive based on right_op parameter. This should be the same type as var_type. :param function left_op: The comparison operator for the left-most comparison in the expression ``min_value left_op value right_op value``. This operator should generally be either ``operator.lt`` or ``operator.le``. Default: ``operator.le``. :param function right_op: The comparison operator for the right-most comparison in the expression ``min_value left_op value right_op value``. This operator should generally be either ``operator.lt`` or ``operator.le``. Default: ``operator.lt``. """ if "var_type" not in kwargs: raise ParameterException("var_type must be specified") self._var_type = kwargs.pop("var_type") if "min_value" not in kwargs: raise ParameterException("min_value must be specified") self._min_value = kwargs.pop("min_value") if "max_value" not in kwargs: raise ParameterException("max_value must be specified") self._max_value = kwargs.pop("max_value") self._left_op = left_op self._right_op = right_op self._permitted_range = ( "{var_type} in {left_endpoint}{min_value}, {max_value}{right_endpoint}".format( var_type=self._var_type.__name__, min_value=self._min_value, max_value=self._max_value, left_endpoint="[" if left_op == operator.le else "(", right_endpoint=")" if right_op == operator.lt else "]")) super(NumericalParameter, self).__init__(*args, **kwargs) if self.description: self.description += " " else: self.description = "" self.description += "permitted values: " + self._permitted_range def parse(self, s): value = self._var_type(s) if (self._left_op(self._min_value, value) and self._right_op(value, self._max_value)): return value else: raise ValueError( "{s} is not in the set of {permitted_range}".format( s=s, permitted_range=self._permitted_range)) class ChoiceParameter(Parameter): """ A parameter which takes two values: 1. an instance of :class:`~collections.Iterable` and 2. the class of the variables to convert to. In the task definition, use .. code-block:: python class MyTask(luigi.Task): my_param = luigi.ChoiceParameter(choices=[0.1, 0.2, 0.3], var_type=float) At the command line, use .. code-block:: console $ luigi --module my_tasks MyTask --my-param 0.1 Consider using :class:`~luigi.EnumParameter` for a typed, structured alternative. This class can perform the same role when all choices are the same type and transparency of parameter value on the command line is desired. """ def __init__(self, var_type=str, *args, **kwargs): """ :param function var_type: The type of the input variable, e.g. str, int, float, etc. Default: str :param choices: An iterable, all of whose elements are of `var_type` to restrict parameter choices to. """ if "choices" not in kwargs: raise ParameterException("A choices iterable must be specified") self._choices = set(kwargs.pop("choices")) self._var_type = var_type assert all(type(choice) is self._var_type for choice in self._choices), "Invalid type in choices" super(ChoiceParameter, self).__init__(*args, **kwargs) if self.description: self.description += " " else: self.description = "" self.description += ( "Choices: {" + ", ".join(str(choice) for choice in self._choices) + "}") def parse(self, s): var = self._var_type(s) return self.normalize(var) def normalize(self, var): if var in self._choices: return var else: raise ValueError("{var} is not a valid choice from {choices}".format( var=var, choices=self._choices)) # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ''' Parameters are one of the core concepts of Luigi. All Parameters sit on :class:`~luigi.task.Task` classes. See :ref:`Parameter` for more info on how to define parameters. ''' import abc import datetime import warnings from enum import IntEnum import json from json import JSONEncoder from collections import OrderedDict, Mapping import operator import functools from ast import literal_eval try: from ConfigParser import NoOptionError, NoSectionError except ImportError: from configparser import NoOptionError, NoSectionError from luigi import date_interval from luigi import task_register from luigi import six from luigi import configuration from luigi.cmdline_parser import CmdlineParser _no_value = object() class ParameterVisibility(IntEnum): """ Possible values for the parameter visibility option. Public is the default. See :doc:`/parameters` for more info. """ PUBLIC = 0 HIDDEN = 1 PRIVATE = 2 @classmethod def has_value(cls, value): return any(value == item.value for item in cls) def serialize(self): return self.value class ParameterException(Exception): """ Base exception. """ pass class MissingParameterException(ParameterException): """ Exception signifying that there was a missing Parameter. """ pass class UnknownParameterException(ParameterException): """ Exception signifying that an unknown Parameter was supplied. """ pass class DuplicateParameterException(ParameterException): """ Exception signifying that a Parameter was specified multiple times. """ pass class Parameter(object): """ Parameter whose value is a ``str``, and a base class for other parameter types. Parameters are objects set on the Task class level to make it possible to parameterize tasks. For instance: .. code:: python class MyTask(luigi.Task): foo = luigi.Parameter() class RequiringTask(luigi.Task): def requires(self): return MyTask(foo="hello") def run(self): print(self.requires().foo) # prints "hello" This makes it possible to instantiate multiple tasks, eg ``MyTask(foo='bar')`` and ``MyTask(foo='baz')``. The task will then have the ``foo`` attribute set appropriately. When a task is instantiated, it will first use any argument as the value of the parameter, eg. if you instantiate ``a = TaskA(x=44)`` then ``a.x == 44``. When the value is not provided, the value will be resolved in this order of falling priority: * Any value provided on the command line: - To the root task (eg. ``--param xyz``) - Then to the class, using the qualified task name syntax (eg. ``--TaskA-param xyz``). * With ``[TASK_NAME]>PARAM_NAME: <serialized value>`` syntax. See :ref:`ParamConfigIngestion` * Any default value set using the ``default`` flag. Parameter objects may be reused, but you must then set the ``positional=False`` flag. """ _counter = 0 # non-atomically increasing counter used for ordering parameters. def __init__(self, default=_no_value, is_global=False, significant=True, description=None, config_path=None, positional=True, always_in_help=False, batch_method=None, visibility=ParameterVisibility.PUBLIC): """ :param default: the default value for this parameter. This should match the type of the Parameter, i.e. ``datetime.date`` for ``DateParameter`` or ``int`` for ``IntParameter``. By default, no default is stored and the value must be specified at runtime. :param bool significant: specify ``False`` if the parameter should not be treated as part of the unique identifier for a Task. An insignificant Parameter might also be used to specify a password or other sensitive information that should not be made public via the scheduler. Default: ``True``. :param str description: A human-readable string describing the purpose of this Parameter. For command-line invocations, this will be used as the `help` string shown to users. Default: ``None``. :param dict config_path: a dictionary with entries ``section`` and ``name`` specifying a config file entry from which to read the default value for this parameter. DEPRECATED. Default: ``None``. :param bool positional: If true, you can set the argument as a positional argument. It's true by default but we recommend ``positional=False`` for abstract base classes and similar cases. :param bool always_in_help: For the --help option in the command line parsing. Set true to always show in --help. :param function(iterable[A])->A batch_method: Method to combine an iterable of parsed parameter values into a single value. Used when receiving batched parameter lists from the scheduler. See :ref:`batch_method` :param visibility: A Parameter whose value is a :py:class:`~luigi.parameter.ParameterVisibility`. Default value is ParameterVisibility.PUBLIC """ self._default = default self._batch_method = batch_method if is_global: warnings.warn("is_global support is removed. Assuming positional=False", DeprecationWarning, stacklevel=2) positional = False self.significant = significant # Whether different values for this parameter will differentiate otherwise equal tasks self.positional = positional self.visibility = visibility if ParameterVisibility.has_value(visibility) else ParameterVisibility.PUBLIC self.description = description self.always_in_help = always_in_help if config_path is not None and ('section' not in config_path or 'name' not in config_path): raise ParameterException('config_path must be a hash containing entries for section and name') self._config_path = config_path self._counter = Parameter._counter # We need to keep track of this to get the order right (see Task class) Parameter._counter += 1 def _get_value_from_config(self, section, name): """Loads the default from the config. Returns _no_value if it doesn't exist""" conf = configuration.get_config() try: value = conf.get(section, name) except (NoSectionError, NoOptionError, KeyError): return _no_value return self.parse(value) def _get_value(self, task_name, param_name): for value, warn in self._value_iterator(task_name, param_name): if value != _no_value: if warn: warnings.warn(warn, DeprecationWarning) return value return _no_value def _value_iterator(self, task_name, param_name): """ Yield the parameter values, with optional deprecation warning as second tuple value. The parameter value will be whatever non-_no_value that is yielded first. """ cp_parser = CmdlineParser.get_instance() if cp_parser: dest = self._parser_global_dest(param_name, task_name) found = getattr(cp_parser.known_args, dest, None) yield (self._parse_or_no_value(found), None) yield (self._get_value_from_config(task_name, param_name), None) yield (self._get_value_from_config(task_name, param_name.replace('_', '-')), 'Configuration [{}] {} (with dashes) should be avoided. Please use underscores.'.format( task_name, param_name)) if self._config_path: yield (self._get_value_from_config(self._config_path['section'], self._config_path['name']), 'The use of the configuration [{}] {} is deprecated. Please use [{}] {}'.format( self._config_path['section'], self._config_path['name'], task_name, param_name)) yield (self._default, None) def has_task_value(self, task_name, param_name): return self._get_value(task_name, param_name) != _no_value def task_value(self, task_name, param_name): value = self._get_value(task_name, param_name) if value == _no_value: raise MissingParameterException("No default specified") else: return self.normalize(value) def _is_batchable(self): return self._batch_method is not None def parse(self, x): """ Parse an individual value from the input. The default implementation is the identity function, but subclasses should override this method for specialized parsing. :param str x: the value to parse. :return: the parsed value. """ return x # default impl def _parse_list(self, xs): """ Parse a list of values from the scheduler. Only possible if this is_batchable() is True. This will combine the list into a single parameter value using batch method. This should never need to be overridden. :param xs: list of values to parse and combine :return: the combined parsed values """ if not self._is_batchable(): raise NotImplementedError('No batch method found') elif not xs: raise ValueError('Empty parameter list passed to parse_list') else: return self._batch_method(map(self.parse, xs)) def serialize(self, x): """ Opposite of :py:meth:`parse`. Converts the value ``x`` to a string. :param x: the value to serialize. """ return str(x) def _warn_on_wrong_param_type(self, param_name, param_value): if self.__class__ != Parameter: return if not isinstance(param_value, six.string_types): warnings.warn('Parameter "{}" with value "{}" is not of type string.'.format(param_name, param_value)) def normalize(self, x): """ Given a parsed parameter value, normalizes it. The value can either be the result of parse(), the default value or arguments passed into the task's constructor by instantiation. This is very implementation defined, but can be used to validate/clamp valid values. For example, if you wanted to only accept even integers, and "correct" odd values to the nearest integer, you can implement normalize as ``x // 2 * 2``. """ return x # default impl def next_in_enumeration(self, _value): """ If your Parameter type has an enumerable ordering of values. You can choose to override this method. This method is used by the :py:mod:`luigi.execution_summary` module for pretty printing purposes. Enabling it to pretty print tasks like ``MyTask(num=1), MyTask(num=2), MyTask(num=3)`` to ``MyTask(num=1..3)``. :param value: The value :return: The next value, like "value + 1". Or ``None`` if there's no enumerable ordering. """ return None def _parse_or_no_value(self, x): if not x: return _no_value else: return self.parse(x) @staticmethod def _parser_global_dest(param_name, task_name): return task_name + '_' + param_name @classmethod def _parser_kwargs(cls, param_name, task_name=None): return { "action": "store", "dest": cls._parser_global_dest(param_name, task_name) if task_name else param_name, } class OptionalParameter(Parameter): """ A Parameter that treats empty string as None """ def serialize(self, x): if x is None: return '' else: return str(x) def parse(self, x): return x or None def _warn_on_wrong_param_type(self, param_name, param_value): if self.__class__ != OptionalParameter: return if not isinstance(param_value, six.string_types) and param_value is not None: warnings.warn('OptionalParameter "{}" with value "{}" is not of type string or None.'.format( param_name, param_value)) _UNIX_EPOCH = datetime.datetime.utcfromtimestamp(0) class _DateParameterBase(Parameter): """ Base class Parameter for date (not datetime). """ def __init__(self, interval=1, start=None, **kwargs): super(_DateParameterBase, self).__init__(**kwargs) self.interval = interval self.start = start if start is not None else _UNIX_EPOCH.date() @abc.abstractproperty def date_format(self): """ Override me with a :py:meth:`~datetime.date.strftime` string. """ pass def parse(self, s): """ Parses a date string formatted like ``YYYY-MM-DD``. """ return datetime.datetime.strptime(s, self.date_format).date() def serialize(self, dt): """ Converts the date to a string using the :py:attr:`~_DateParameterBase.date_format`. """ if dt is None: return str(dt) return dt.strftime(self.date_format) class DateParameter(_DateParameterBase): """ Parameter whose value is a :py:class:`~datetime.date`. A DateParameter is a Date string formatted ``YYYY-MM-DD``. For example, ``2013-07-10`` specifies July 10, 2013. DateParameters are 90% of the time used to be interpolated into file system paths or the like. Here is a gentle reminder of how to interpolate date parameters into strings: .. code:: python class MyTask(luigi.Task): date = luigi.DateParameter() def run(self): templated_path = "/my/path/to/my/dataset/{date:%Y/%m/%d}/" instantiated_path = templated_path.format(date=self.date) # print(instantiated_path) --> /my/path/to/my/dataset/2016/06/09/ # ... use instantiated_path ... To set this parameter to default to the current day. You can write code like this: .. code:: python import datetime class MyTask(luigi.Task): date = luigi.DateParameter(default=datetime.date.today()) """ date_format = '%Y-%m-%d' def next_in_enumeration(self, value): return value + datetime.timedelta(days=self.interval) def normalize(self, value): if value is None: return None if isinstance(value, datetime.datetime): value = value.date() delta = (value - self.start).days % self.interval return value - datetime.timedelta(days=delta) class MonthParameter(DateParameter): """ Parameter whose value is a :py:class:`~datetime.date`, specified to the month (day of :py:class:`~datetime.date` is "rounded" to first of the month). A MonthParameter is a Date string formatted ``YYYY-MM``. For example, ``2013-07`` specifies July of 2013. Task objects constructed from code accept :py:class:`~datetime.date` (ignoring the day value) or :py:class:`~luigi.date_interval.Month`. """ date_format = '%Y-%m' def _add_months(self, date, months): """ Add ``months`` months to ``date``. Unfortunately we can't use timedeltas to add months because timedelta counts in days and there's no foolproof way to add N months in days without counting the number of days per month. """ year = date.year + (date.month + months - 1) // 12 month = (date.month + months - 1) % 12 + 1 return datetime.date(year=year, month=month, day=1) def next_in_enumeration(self, value): return self._add_months(value, self.interval) def normalize(self, value): if value is None: return None if isinstance(value, date_interval.Month): value = value.date_a months_since_start = (value.year - self.start.year) * 12 + (value.month - self.start.month) months_since_start -= months_since_start % self.interval return self._add_months(self.start, months_since_start) class YearParameter(DateParameter): """ Parameter whose value is a :py:class:`~datetime.date`, specified to the year (day and month of :py:class:`~datetime.date` is "rounded" to first day of the year). A YearParameter is a Date string formatted ``YYYY``. Task objects constructed from code accept :py:class:`~datetime.date` (ignoring the month and day values) or :py:class:`~luigi.date_interval.Year`. """ date_format = '%Y' def next_in_enumeration(self, value): return value.replace(year=value.year + self.interval) def normalize(self, value): if value is None: return None if isinstance(value, date_interval.Year): value = value.date_a delta = (value.year - self.start.year) % self.interval return datetime.date(year=value.year - delta, month=1, day=1) class _DatetimeParameterBase(Parameter): """ Base class Parameter for datetime """ def __init__(self, interval=1, start=None, **kwargs): super(_DatetimeParameterBase, self).__init__(**kwargs) self.interval = interval self.start = start if start is not None else _UNIX_EPOCH @abc.abstractproperty def date_format(self): """ Override me with a :py:meth:`~datetime.date.strftime` string. """ pass @abc.abstractproperty def _timedelta(self): """ How to move one interval of this type forward (i.e. not counting self.interval). """ pass def parse(self, s): """ Parses a string to a :py:class:`~datetime.datetime`. """ return datetime.datetime.strptime(s, self.date_format) def serialize(self, dt): """ Converts the date to a string using the :py:attr:`~_DatetimeParameterBase.date_format`. """ if dt is None: return str(dt) return dt.strftime(self.date_format) @staticmethod def _convert_to_dt(dt): if not isinstance(dt, datetime.datetime): dt = datetime.datetime.combine(dt, datetime.time.min) return dt def normalize(self, dt): """ Clamp dt to every Nth :py:attr:`~_DatetimeParameterBase.interval` starting at :py:attr:`~_DatetimeParameterBase.start`. """ if dt is None: return None dt = self._convert_to_dt(dt) dt = dt.replace(microsecond=0) # remove microseconds, to avoid float rounding issues. delta = (dt - self.start).total_seconds() granularity = (self._timedelta * self.interval).total_seconds() return dt - datetime.timedelta(seconds=delta % granularity) def next_in_enumeration(self, value): return value + self._timedelta * self.interval class DateHourParameter(_DatetimeParameterBase): """ Parameter whose value is a :py:class:`~datetime.datetime` specified to the hour. A DateHourParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted date and time specified to the hour. For example, ``2013-07-10T19`` specifies July 10, 2013 at 19:00. """ date_format = '%Y-%m-%dT%H' # ISO 8601 is to use 'T' _timedelta = datetime.timedelta(hours=1) class DateMinuteParameter(_DatetimeParameterBase): """ Parameter whose value is a :py:class:`~datetime.datetime` specified to the minute. A DateMinuteParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted date and time specified to the minute. For example, ``2013-07-10T1907`` specifies July 10, 2013 at 19:07. The interval parameter can be used to clamp this parameter to every N minutes, instead of every minute. """ date_format = '%Y-%m-%dT%H%M' _timedelta = datetime.timedelta(minutes=1) deprecated_date_format = '%Y-%m-%dT%HH%M' def parse(self, s): try: value = datetime.datetime.strptime(s, self.deprecated_date_format) warnings.warn( 'Using "H" between hours and minutes is deprecated, omit it instead.', DeprecationWarning, stacklevel=2 ) return value except ValueError: return super(DateMinuteParameter, self).parse(s) class DateSecondParameter(_DatetimeParameterBase): """ Parameter whose value is a :py:class:`~datetime.datetime` specified to the second. A DateSecondParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted date and time specified to the second. For example, ``2013-07-10T190738`` specifies July 10, 2013 at 19:07:38. The interval parameter can be used to clamp this parameter to every N seconds, instead of every second. """ date_format = '%Y-%m-%dT%H%M%S' _timedelta = datetime.timedelta(seconds=1) class IntParameter(Parameter): """ Parameter whose value is an ``int``. """ def parse(self, s): """ Parses an ``int`` from the string using ``int()``. """ return int(s) def next_in_enumeration(self, value): return value + 1 class FloatParameter(Parameter): """ Parameter whose value is a ``float``. """ def parse(self, s): """ Parses a ``float`` from the string using ``float()``. """ return float(s) class BoolParameter(Parameter): """ A Parameter whose value is a ``bool``. This parameter has an implicit default value of ``False``. For the command line interface this means that the value is ``False`` unless you add ``"--the-bool-parameter"`` to your command without giving a parameter value. This is considered *implicit* parsing (the default). However, in some situations one might want to give the explicit bool value (``"--the-bool-parameter true|false"``), e.g. when you configure the default value to be ``True``. This is called *explicit* parsing. When omitting the parameter value, it is still considered ``True`` but to avoid ambiguities during argument parsing, make sure to always place bool parameters behind the task family on the command line when using explicit parsing. You can toggle between the two parsing modes on a per-parameter base via .. code-block:: python class MyTask(luigi.Task): implicit_bool = luigi.BoolParameter(parsing=luigi.BoolParameter.IMPLICIT_PARSING) explicit_bool = luigi.BoolParameter(parsing=luigi.BoolParameter.EXPLICIT_PARSING) or globally by .. code-block:: python luigi.BoolParameter.parsing = luigi.BoolParameter.EXPLICIT_PARSING for all bool parameters instantiated after this line. """ IMPLICIT_PARSING = "implicit" EXPLICIT_PARSING = "explicit" parsing = IMPLICIT_PARSING def __init__(self, *args, **kwargs): self.parsing = kwargs.pop("parsing", self.__class__.parsing) super(BoolParameter, self).__init__(*args, **kwargs) if self._default == _no_value: self._default = False def parse(self, val): """ Parses a ``bool`` from the string, matching 'true' or 'false' ignoring case. """ s = str(val).lower() if s == "true": return True elif s == "false": return False else: raise ValueError("cannot interpret '{}' as boolean".format(val)) def normalize(self, value): try: return self.parse(value) except ValueError: return None def _parser_kwargs(self, *args, **kwargs): parser_kwargs = super(BoolParameter, self)._parser_kwargs(*args, **kwargs) if self.parsing == self.IMPLICIT_PARSING: parser_kwargs["action"] = "store_true" elif self.parsing == self.EXPLICIT_PARSING: parser_kwargs["nargs"] = "?" parser_kwargs["const"] = True else: raise ValueError("unknown parsing value '{}'".format(self.parsing)) return parser_kwargs class DateIntervalParameter(Parameter): """ A Parameter whose value is a :py:class:`~luigi.date_interval.DateInterval`. Date Intervals are specified using the ISO 8601 date notation for dates (eg. "2015-11-04"), months (eg. "2015-05"), years (eg. "2015"), or weeks (eg. "2015-W35"). In addition, it also supports arbitrary date intervals provided as two dates separated with a dash (eg. "2015-11-04-2015-12-04"). """ def parse(self, s): """ Parses a :py:class:`~luigi.date_interval.DateInterval` from the input. see :py:mod:`luigi.date_interval` for details on the parsing of DateIntervals. """ # TODO: can we use xml.utils.iso8601 or something similar? from luigi import date_interval as d for cls in [d.Year, d.Month, d.Week, d.Date, d.Custom]: i = cls.parse(s) if i: return i raise ValueError('Invalid date interval - could not be parsed') class TimeDeltaParameter(Parameter): """ Class that maps to timedelta using strings in any of the following forms: * ``n {w[eek[s]]|d[ay[s]]|h[our[s]]|m[inute[s]|s[second[s]]}`` (e.g. "1 week 2 days" or "1 h") Note: multiple arguments must be supplied in longest to shortest unit order * ISO 8601 duration ``PnDTnHnMnS`` (each field optional, years and months not supported) * ISO 8601 duration ``PnW`` See https://en.wikipedia.org/wiki/ISO_8601#Durations """ def _apply_regex(self, regex, input): import re re_match = re.match(regex, input) if re_match and any(re_match.groups()): kwargs = {} has_val = False for k, v in six.iteritems(re_match.groupdict(default="0")): val = int(v) if val > -1: has_val = True kwargs[k] = val if has_val: return datetime.timedelta(**kwargs) def _parseIso8601(self, input): def field(key): return r"(?P<%s>\d+)%s" % (key, key[0].upper()) def optional_field(key): return "(%s)?" % field(key) # A little loose: ISO 8601 does not allow weeks in combination with other fields, but this regex does (as does python timedelta) regex = "P(%s|%s(T%s)?)" % (field("weeks"), optional_field("days"), "".join([optional_field(key) for key in ["hours", "minutes", "seconds"]])) return self._apply_regex(regex, input) def _parseSimple(self, input): keys = ["weeks", "days", "hours", "minutes", "seconds"] # Give the digits a regex group name from the keys, then look for text with the first letter of the key, # optionally followed by the rest of the word, with final char (the "s") optional regex = "".join([r"((?P<%s>\d+) ?%s(%s)?(%s)? ?)?" % (k, k[0], k[1:-1], k[-1]) for k in keys]) return self._apply_regex(regex, input) def parse(self, input): """ Parses a time delta from the input. See :py:class:`TimeDeltaParameter` for details on supported formats. """ result = self._parseIso8601(input) if not result: result = self._parseSimple(input) if result is not None: return result else: raise ParameterException("Invalid time delta - could not parse %s" % input) def serialize(self, x): """ Converts datetime.timedelta to a string :param x: the value to serialize. """ weeks = x.days // 7 days = x.days % 7 hours = x.seconds // 3600 minutes = (x.seconds % 3600) // 60 seconds = (x.seconds % 3600) % 60 result = "{} w {} d {} h {} m {} s".format(weeks, days, hours, minutes, seconds) return result def _warn_on_wrong_param_type(self, param_name, param_value): if self.__class__ != TimeDeltaParameter: return if not isinstance(param_value, datetime.timedelta): warnings.warn('Parameter "{}" with value "{}" is not of type timedelta.'.format(param_name, param_value)) class TaskParameter(Parameter): """ A parameter that takes another luigi task class. When used programatically, the parameter should be specified directly with the :py:class:`luigi.task.Task` (sub) class. Like ``MyMetaTask(my_task_param=my_tasks.MyTask)``. On the command line, you specify the :py:meth:`luigi.task.Task.get_task_family`. Like .. code-block:: console $ luigi --module my_tasks MyMetaTask --my_task_param my_namespace.MyTask Where ``my_namespace.MyTask`` is defined in the ``my_tasks`` python module. When the :py:class:`luigi.task.Task` class is instantiated to an object. The value will always be a task class (and not a string). """ def parse(self, input): """ Parse a task_famly using the :class:`~luigi.task_register.Register` """ return task_register.Register.get_task_cls(input) def serialize(self, cls): """ Converts the :py:class:`luigi.task.Task` (sub) class to its family name. """ return cls.get_task_family() class EnumParameter(Parameter): """ A parameter whose value is an :class:`~enum.Enum`. In the task definition, use .. code-block:: python class Model(enum.Enum): Honda = 1 Volvo = 2 class MyTask(luigi.Task): my_param = luigi.EnumParameter(enum=Model) At the command line, use, .. code-block:: console $ luigi --module my_tasks MyTask --my-param Honda """ def __init__(self, *args, **kwargs): if 'enum' not in kwargs: raise ParameterException('An enum class must be specified.') self._enum = kwargs.pop('enum') super(EnumParameter, self).__init__(*args, **kwargs) def parse(self, s): try: return self._enum[s] except KeyError: raise ValueError('Invalid enum value - could not be parsed') def serialize(self, e): return e.name class _FrozenOrderedDict(Mapping): """ It is an immutable wrapper around ordered dictionaries that implements the complete :py:class:`collections.Mapping` interface. It can be used as a drop-in replacement for dictionaries where immutability and ordering are desired. """ def __init__(self, *args, **kwargs): self.__dict = OrderedDict(*args, **kwargs) self.__hash = None def __getitem__(self, key): return self.__dict[key] def __iter__(self): return iter(self.__dict) def __len__(self): return len(self.__dict) def __repr__(self): return '<FrozenOrderedDict %s>' % repr(self.__dict) def __hash__(self): if self.__hash is None: hashes = map(hash, self.items()) self.__hash = functools.reduce(operator.xor, hashes, 0) return self.__hash def get_wrapped(self): return self.__dict def _recursively_freeze(value): """ Recursively walks ``Mapping``s and ``list``s and converts them to ``_FrozenOrderedDict`` and ``tuples``, respectively. """ if isinstance(value, Mapping): return _FrozenOrderedDict(((k, _recursively_freeze(v)) for k, v in value.items())) elif isinstance(value, list) or isinstance(value, tuple): return tuple(_recursively_freeze(v) for v in value) return value class _DictParamEncoder(JSONEncoder): """ JSON encoder for :py:class:`~DictParameter`, which makes :py:class:`~_FrozenOrderedDict` JSON serializable. """ def default(self, obj): if isinstance(obj, _FrozenOrderedDict): return obj.get_wrapped() return json.JSONEncoder.default(self, obj) class DictParameter(Parameter): """ Parameter whose value is a ``dict``. In the task definition, use .. code-block:: python class MyTask(luigi.Task): tags = luigi.DictParameter() def run(self): logging.info("Find server with role: %s", self.tags['role']) server = aws.ec2.find_my_resource(self.tags) At the command line, use .. code-block:: console $ luigi --module my_tasks MyTask --tags <JSON string> Simple example with two tags: .. code-block:: console $ luigi --module my_tasks MyTask --tags '{"role": "web", "env": "staging"}' It can be used to define dynamic parameters, when you do not know the exact list of your parameters (e.g. list of tags, that are dynamically constructed outside Luigi), or you have a complex parameter containing logically related values (like a database connection config). """ def normalize(self, value): """ Ensure that dictionary parameter is converted to a _FrozenOrderedDict so it can be hashed. """ return _recursively_freeze(value) def parse(self, s): """ Parses an immutable and ordered ``dict`` from a JSON string using standard JSON library. We need to use an immutable dictionary, to create a hashable parameter and also preserve the internal structure of parsing. The traversal order of standard ``dict`` is undefined, which can result various string representations of this parameter, and therefore a different task id for the task containing this parameter. This is because task id contains the hash of parameters' JSON representation. :param s: String to be parse """ return json.loads(s, object_pairs_hook=_FrozenOrderedDict) def serialize(self, x): return json.dumps(x, cls=_DictParamEncoder) class ListParameter(Parameter): """ Parameter whose value is a ``list``. In the task definition, use .. code-block:: python class MyTask(luigi.Task): grades = luigi.ListParameter() def run(self): sum = 0 for element in self.grades: sum += element avg = sum / len(self.grades) At the command line, use .. code-block:: console $ luigi --module my_tasks MyTask --grades <JSON string> Simple example with two grades: .. code-block:: console $ luigi --module my_tasks MyTask --grades '[100,70]' """ def normalize(self, x): """ Ensure that struct is recursively converted to a tuple so it can be hashed. :param str x: the value to parse. :return: the normalized (hashable/immutable) value. """ return _recursively_freeze(x) def parse(self, x): """ Parse an individual value from the input. :param str x: the value to parse. :return: the parsed value. """ return list(json.loads(x, object_pairs_hook=_FrozenOrderedDict)) def serialize(self, x): """ Opposite of :py:meth:`parse`. Converts the value ``x`` to a string. :param x: the value to serialize. """ return json.dumps(x, cls=_DictParamEncoder) class TupleParameter(ListParameter): """ Parameter whose value is a ``tuple`` or ``tuple`` of tuples. In the task definition, use .. code-block:: python class MyTask(luigi.Task): book_locations = luigi.TupleParameter() def run(self): for location in self.book_locations: print("Go to page %d, line %d" % (location[0], location[1])) At the command line, use .. code-block:: console $ luigi --module my_tasks MyTask --book_locations <JSON string> Simple example with two grades: .. code-block:: console $ luigi --module my_tasks MyTask --book_locations '((12,3),(4,15),(52,1))' """ def parse(self, x): """ Parse an individual value from the input. :param str x: the value to parse. :return: the parsed value. """ # Since the result of json.dumps(tuple) differs from a tuple string, we must handle either case. # A tuple string may come from a config file or from cli execution. # t = ((1, 2), (3, 4)) # t_str = '((1,2),(3,4))' # t_json_str = json.dumps(t) # t_json_str == '[[1, 2], [3, 4]]' # json.loads(t_json_str) == t # json.loads(t_str) == ValueError: No JSON object could be decoded # Therefore, if json.loads(x) returns a ValueError, try ast.literal_eval(x). # ast.literal_eval(t_str) == t try: # loop required to parse tuple of tuples return tuple(tuple(x) for x in json.loads(x, object_pairs_hook=_FrozenOrderedDict)) except (ValueError, TypeError): return tuple(literal_eval(x)) # if this causes an error, let that error be raised. class NumericalParameter(Parameter): """ Parameter whose value is a number of the specified type, e.g. ``int`` or ``float`` and in the range specified. In the task definition, use .. code-block:: python class MyTask(luigi.Task): my_param_1 = luigi.NumericalParameter( var_type=int, min_value=-3, max_value=7) # -3 <= my_param_1 < 7 my_param_2 = luigi.NumericalParameter( var_type=int, min_value=-3, max_value=7, left_op=operator.lt, right_op=operator.le) # -3 < my_param_2 <= 7 At the command line, use .. code-block:: console $ luigi --module my_tasks MyTask --my-param-1 -3 --my-param-2 -2 """ def __init__(self, left_op=operator.le, right_op=operator.lt, *args, **kwargs): """ :param function var_type: The type of the input variable, e.g. int or float. :param min_value: The minimum value permissible in the accepted values range. May be inclusive or exclusive based on left_op parameter. This should be the same type as var_type. :param max_value: The maximum value permissible in the accepted values range. May be inclusive or exclusive based on right_op parameter. This should be the same type as var_type. :param function left_op: The comparison operator for the left-most comparison in the expression ``min_value left_op value right_op value``. This operator should generally be either ``operator.lt`` or ``operator.le``. Default: ``operator.le``. :param function right_op: The comparison operator for the right-most comparison in the expression ``min_value left_op value right_op value``. This operator should generally be either ``operator.lt`` or ``operator.le``. Default: ``operator.lt``. """ if "var_type" not in kwargs: raise ParameterException("var_type must be specified") self._var_type = kwargs.pop("var_type") if "min_value" not in kwargs: raise ParameterException("min_value must be specified") self._min_value = kwargs.pop("min_value") if "max_value" not in kwargs: raise ParameterException("max_value must be specified") self._max_value = kwargs.pop("max_value") self._left_op = left_op self._right_op = right_op self._permitted_range = ( "{var_type} in {left_endpoint}{min_value}, {max_value}{right_endpoint}".format( var_type=self._var_type.__name__, min_value=self._min_value, max_value=self._max_value, left_endpoint="[" if left_op == operator.le else "(", right_endpoint=")" if right_op == operator.lt else "]")) super(NumericalParameter, self).__init__(*args, **kwargs) if self.description: self.description += " " else: self.description = "" self.description += "permitted values: " + self._permitted_range def parse(self, s): value = self._var_type(s) if (self._left_op(self._min_value, value) and self._right_op(value, self._max_value)): return value else: raise ValueError( "{s} is not in the set of {permitted_range}".format( s=s, permitted_range=self._permitted_range)) class ChoiceParameter(Parameter): """ A parameter which takes two values: 1. an instance of :class:`~collections.Iterable` and 2. the class of the variables to convert to. In the task definition, use .. code-block:: python class MyTask(luigi.Task): my_param = luigi.ChoiceParameter(choices=[0.1, 0.2, 0.3], var_type=float) At the command line, use .. code-block:: console $ luigi --module my_tasks MyTask --my-param 0.1 Consider using :class:`~luigi.EnumParameter` for a typed, structured alternative. This class can perform the same role when all choices are the same type and transparency of parameter value on the command line is desired. """ def __init__(self, var_type=str, *args, **kwargs): """ :param function var_type: The type of the input variable, e.g. str, int, float, etc. Default: str :param choices: An iterable, all of whose elements are of `var_type` to restrict parameter choices to. """ if "choices" not in kwargs: raise ParameterException("A choices iterable must be specified") self._choices = set(kwargs.pop("choices")) self._var_type = var_type assert all(type(choice) is self._var_type for choice in self._choices), "Invalid type in choices" super(ChoiceParameter, self).__init__(*args, **kwargs) if self.description: self.description += " " else: self.description = "" self.description += ( "Choices: {" + ", ".join(str(choice) for choice in self._choices) + "}") def parse(self, s): var = self._var_type(s) return self.normalize(var) def normalize(self, var): if var in self._choices: return var else: raise ValueError("{var} is not a valid choice from {choices}".format( var=var, choices=self._choices))
BugsInPy/BugsInPy/temp/projects/luigi/bug-3-fixed/luigi/luigi/parameter.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-3-buggy/luigi/luigi/parameter.py
luigi-bug-15
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The system for scheduling tasks and executing them in order. Deals with dependencies, priorities, resources, etc. The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them. See :doc:`/central_scheduler` for more info. """ import collections try: import cPickle as pickle except ImportError: import pickle import functools import itertools import logging import os import re import time from luigi import six from luigi import configuration from luigi import notifications from luigi import parameter from luigi import task_history as history from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN from luigi.task import Config logger = logging.getLogger("luigi.server") class Scheduler(object): """ Abstract base class. Note that the methods all take string arguments, not Task objects... """"" add_task = NotImplemented get_work = NotImplemented ping = NotImplemented UPSTREAM_RUNNING = 'UPSTREAM_RUNNING' UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT' UPSTREAM_FAILED = 'UPSTREAM_FAILED' UPSTREAM_DISABLED = 'UPSTREAM_DISABLED' UPSTREAM_SEVERITY_ORDER = ( '', UPSTREAM_RUNNING, UPSTREAM_MISSING_INPUT, UPSTREAM_FAILED, UPSTREAM_DISABLED, ) UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index STATUS_TO_UPSTREAM_MAP = { FAILED: UPSTREAM_FAILED, RUNNING: UPSTREAM_RUNNING, PENDING: UPSTREAM_MISSING_INPUT, DISABLED: UPSTREAM_DISABLED, } TASK_FAMILY_RE = re.compile(r'([^(_]+)[(_]') class scheduler(Config): # TODO(erikbern): the config_path is needed for backwards compatilibity. We # should drop the compatibility at some point retry_delay = parameter.FloatParameter(default=900.0) remove_delay = parameter.FloatParameter(default=600.0) worker_disconnect_delay = parameter.FloatParameter(default=60.0) state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle') # Jobs are disabled if we see more than disable_failures failures in disable_window seconds. # These disables last for disable_persist seconds. disable_window = parameter.IntParameter(default=3600, config_path=dict(section='scheduler', name='disable-window-seconds')) disable_failures = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-num-failures')) disable_hard_timeout = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-hard-timeout')) disable_persist = parameter.IntParameter(default=86400, config_path=dict(section='scheduler', name='disable-persist-seconds')) max_shown_tasks = parameter.IntParameter(default=100000) max_graph_nodes = parameter.IntParameter(default=100000) prune_done_tasks = parameter.BoolParameter(default=False) record_task_history = parameter.BoolParameter(default=False) prune_on_get_work = parameter.BoolParameter(default=False) class Failures(object): """ This class tracks the number of failures in a given time window. Failures added are marked with the current timestamp, and this class counts the number of failures in a sliding time window ending at the present. """ def __init__(self, window): """ Initialize with the given window. :param window: how long to track failures for, as a float (number of seconds). """ self.window = window self.failures = collections.deque() self.first_failure_time = None def add_failure(self): """ Add a failure event with the current timestamp. """ failure_time = time.time() if not self.first_failure_time: self.first_failure_time = failure_time self.failures.append(failure_time) def num_failures(self): """ Return the number of failures in the window. """ min_time = time.time() - self.window while self.failures and self.failures[0] < min_time: self.failures.popleft() return len(self.failures) def clear(self): """ Clear the failure queue. """ self.failures.clear() def _get_default(x, default): if x is not None: return x else: return default class Task(object): def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None, params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None, tracking_url=None): self.id = task_id self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active) self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active if deps is None: self.deps = set() else: self.deps = set(deps) self.status = status # PENDING, RUNNING, FAILED or DONE self.time = time.time() # Timestamp when task was first added self.updated = self.time self.retry = None self.remove = None self.worker_running = None # the worker id that is currently running the task or None self.time_running = None # Timestamp when picked up by worker self.expl = None self.priority = priority self.resources = _get_default(resources, {}) self.family = family self.module = module self.params = _get_default(params, {}) self.disable_failures = disable_failures self.disable_hard_timeout = disable_hard_timeout self.failures = Failures(disable_window) self.tracking_url = tracking_url self.scheduler_disable_time = None self.runnable = False def __repr__(self): return "Task(%r)" % vars(self) def add_failure(self): self.failures.add_failure() def has_excessive_failures(self): if (self.failures.first_failure_time is not None and self.disable_hard_timeout): if (time.time() >= self.failures.first_failure_time + self.disable_hard_timeout): return True if self.failures.num_failures() >= self.disable_failures: return True return False def can_disable(self): return (self.disable_failures is not None or self.disable_hard_timeout is not None) @property def pretty_id(self): param_str = ', '.join('{}={}'.format(key, value) for key, value in self.params.items()) return '{}({})'.format(self.family, param_str) class Worker(object): """ Structure for tracking worker activity and keeping their references. """ def __init__(self, worker_id, last_active=None): self.id = worker_id self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host) self.last_active = last_active or time.time() # seconds since epoch self.last_get_work = None self.started = time.time() # seconds since epoch self.tasks = set() # task objects self.info = {} self.disabled = False def add_info(self, info): self.info.update(info) def update(self, worker_reference, get_work=False): if worker_reference: self.reference = worker_reference self.last_active = time.time() if get_work: self.last_get_work = time.time() def prune(self, config): # Delete workers that haven't said anything for a while (probably killed) if self.last_active + config.worker_disconnect_delay < time.time(): return True def get_pending_tasks(self, state): """ Get PENDING (and RUNNING) tasks for this worker. You have to pass in the state for optimization reasons. """ if len(self.tasks) < state.num_pending_tasks(): return six.moves.filter(lambda task: task.status in [PENDING, RUNNING], self.tasks) else: return state.get_pending_tasks() def is_trivial_worker(self, state): """ If it's not an assistant having only tasks that are without requirements. We have to pass the state parameter for optimization reasons. """ if self.assistant: return False return all(not task.resources for task in self.get_pending_tasks(state)) @property def assistant(self): return self.info.get('assistant', False) def __str__(self): return self.id class SimpleTaskState(object): """ Keep track of the current state and handle persistance. The point of this class is to enable other ways to keep state, eg. by using a database These will be implemented by creating an abstract base class that this and other classes inherit from. """ def __init__(self, state_path): self._state_path = state_path self._tasks = {} # map from id to a Task object self._status_tasks = collections.defaultdict(dict) self._active_workers = {} # map from id to a Worker object def get_state(self): return self._tasks, self._active_workers def set_state(self, state): self._tasks, self._active_workers = state def dump(self): try: with open(self._state_path, 'wb') as fobj: pickle.dump(self.get_state(), fobj) except IOError: logger.warning("Failed saving scheduler state", exc_info=1) else: logger.info("Saved state in %s", self._state_path) # prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control? def load(self): if os.path.exists(self._state_path): logger.info("Attempting to load state from %s", self._state_path) try: with open(self._state_path, 'rb') as fobj: state = pickle.load(fobj) except BaseException: logger.exception("Error when loading state. Starting from clean slate.") return self.set_state(state) self._status_tasks = collections.defaultdict(dict) for task in six.itervalues(self._tasks): self._status_tasks[task.status][task.id] = task else: logger.info("No prior state file exists at %s. Starting with clean slate", self._state_path) def get_active_tasks(self, status=None): if status: for task in six.itervalues(self._status_tasks[status]): yield task else: for task in six.itervalues(self._tasks): yield task def get_running_tasks(self): return six.itervalues(self._status_tasks[RUNNING]) def get_pending_tasks(self): return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status]) for status in [PENDING, RUNNING]) def num_pending_tasks(self): """ Return how many tasks are PENDING + RUNNING. O(1). """ return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING]) def get_task(self, task_id, default=None, setdefault=None): if setdefault: task = self._tasks.setdefault(task_id, setdefault) self._status_tasks[task.status][task.id] = task return task else: return self._tasks.get(task_id, default) def has_task(self, task_id): return task_id in self._tasks def re_enable(self, task, config=None): task.scheduler_disable_time = None task.failures.clear() if config: self.set_status(task, FAILED, config) task.failures.clear() def set_status(self, task, new_status, config=None): if new_status == FAILED: assert config is not None if new_status == DISABLED and task.status == RUNNING: return if task.status == DISABLED: if new_status == DONE: self.re_enable(task) # don't allow workers to override a scheduler disable elif task.scheduler_disable_time is not None and new_status != DISABLED: return if new_status == FAILED and task.can_disable() and task.status != DISABLED: task.add_failure() if task.has_excessive_failures(): task.scheduler_disable_time = time.time() new_status = DISABLED notifications.send_error_email( 'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id), '{task} failed {failures} times in the last {window} seconds, so it is being ' 'disabled for {persist} seconds'.format( failures=config.disable_failures, task=task.id, window=config.disable_window, persist=config.disable_persist, )) elif new_status == DISABLED: task.scheduler_disable_time = None if new_status != task.status: self._status_tasks[task.status].pop(task.id) self._status_tasks[new_status][task.id] = task task.status = new_status task.updated = time.time() def fail_dead_worker_task(self, task, config, assistants): # If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants: logger.info("Task %r is marked as running by disconnected worker %r -> marking as " "FAILED with retry delay of %rs", task.id, task.worker_running, config.retry_delay) task.worker_running = None self.set_status(task, FAILED, config) task.retry = time.time() + config.retry_delay def update_status(self, task, config): # Mark tasks with no remaining active stakeholders for deletion if not task.stakeholders: if task.remove is None: logger.info("Task %r has stakeholders %r but none remain connected -> might remove " "task in %s seconds", task.id, task.stakeholders, config.remove_delay) task.remove = time.time() + config.remove_delay # Re-enable task after the disable time expires if task.status == DISABLED and task.scheduler_disable_time is not None: if time.time() - task.scheduler_disable_time > config.disable_persist: self.re_enable(task, config) # Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0 if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time(): self.set_status(task, PENDING, config) def may_prune(self, task): return task.remove and time.time() > task.remove def inactivate_tasks(self, delete_tasks): # The terminology is a bit confusing: we used to "delete" tasks when they became inactive, # but with a pluggable state storage, you might very well want to keep some history of # older tasks as well. That's why we call it "inactivate" (as in the verb) for task in delete_tasks: task_obj = self._tasks.pop(task) self._status_tasks[task_obj.status].pop(task) def get_active_workers(self, last_active_lt=None, last_get_work_gt=None): for worker in six.itervalues(self._active_workers): if last_active_lt is not None and worker.last_active >= last_active_lt: continue last_get_work = getattr(worker, 'last_get_work', None) if last_get_work_gt is not None and ( last_get_work is None or last_get_work <= last_get_work_gt): continue yield worker def get_assistants(self, last_active_lt=None): return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt)) def get_worker_ids(self): return self._active_workers.keys() # only used for unit tests def get_worker(self, worker_id): return self._active_workers.setdefault(worker_id, Worker(worker_id)) def inactivate_workers(self, delete_workers): # Mark workers as inactive for worker in delete_workers: self._active_workers.pop(worker) self._remove_workers_from_tasks(delete_workers) def _remove_workers_from_tasks(self, workers, remove_stakeholders=True): for task in self.get_active_tasks(): if remove_stakeholders: task.stakeholders.difference_update(workers) task.workers.difference_update(workers) def disable_workers(self, workers): self._remove_workers_from_tasks(workers, remove_stakeholders=False) for worker in workers: self.get_worker(worker).disabled = True def get_necessary_tasks(self): necessary_tasks = set() for task in self.get_active_tasks(): if task.status not in (DONE, DISABLED) or \ getattr(task, 'scheduler_disable_time', None) is not None: necessary_tasks.update(task.deps) necessary_tasks.add(task.id) return necessary_tasks class CentralPlannerScheduler(Scheduler): """ Async scheduler that can handle multiple workers, etc. Can be run locally or on a server (using RemoteScheduler + server.Server). """ def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs): """ Keyword Arguments: :param config: an object of class "scheduler" or None (in which the global instance will be used) :param resources: a dict of str->int constraints :param task_history_override: ignore config and use this object as the task history """ self._config = config or scheduler(**kwargs) self._state = SimpleTaskState(self._config.state_path) if task_history_impl: self._task_history = task_history_impl elif self._config.record_task_history: from luigi import db_task_history # Needs sqlalchemy, thus imported here self._task_history = db_task_history.DbTaskHistory() else: self._task_history = history.NopHistory() self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter? self._make_task = functools.partial( Task, disable_failures=self._config.disable_failures, disable_hard_timeout=self._config.disable_hard_timeout, disable_window=self._config.disable_window) self._worker_requests = {} def load(self): self._state.load() def dump(self): self._state.dump() def prune(self): logger.info("Starting pruning of task graph") self._prune_workers() self._prune_tasks() logger.info("Done pruning task graph") def _prune_workers(self): remove_workers = [] for worker in self._state.get_active_workers(): if worker.prune(self._config): logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay) remove_workers.append(worker.id) self._state.inactivate_workers(remove_workers) def _prune_tasks(self): assistant_ids = set(w.id for w in self._state.get_assistants()) remove_tasks = [] if assistant_ids: necessary_tasks = self._state.get_necessary_tasks() else: necessary_tasks = () for task in self._state.get_active_tasks(): self._state.fail_dead_worker_task(task, self._config, assistant_ids) self._state.update_status(task, self._config) if self._state.may_prune(task) and task.id not in necessary_tasks: logger.info("Removing task %r", task.id) remove_tasks.append(task.id) self._state.inactivate_tasks(remove_tasks) def update(self, worker_id, worker_reference=None, get_work=False): """ Keep track of whenever the worker was last active. """ worker = self._state.get_worker(worker_id) worker.update(worker_reference, get_work=get_work) return not getattr(worker, 'disabled', False) def _update_priority(self, task, prio, worker): """ Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled. """ task.priority = prio = max(prio, task.priority) for dep in task.deps or []: t = self._state.get_task(dep) if t is not None and prio > t.priority: self._update_priority(t, prio, worker) def add_task(self, task_id=None, status=PENDING, runnable=True, deps=None, new_deps=None, expl=None, resources=None, priority=0, family='', module=None, params=None, assistant=False, tracking_url=None, **kwargs): """ * add task identified by task_id if it doesn't exist * if deps is not None, update dependency list * update status of task * add additional workers/stakeholders * update priority when needed """ worker_id = kwargs['worker'] worker_enabled = self.update(worker_id) if worker_enabled: _default_task = self._make_task( task_id=task_id, status=PENDING, deps=deps, resources=resources, priority=priority, family=family, module=module, params=params, ) else: _default_task = None task = self._state.get_task(task_id, setdefault=_default_task) if task is None or (task.status != RUNNING and not worker_enabled): return # for setting priority, we'll sometimes create tasks with unset family and params if not task.family: task.family = family if not getattr(task, 'module', None): task.module = module if not task.params: task.params = _get_default(params, {}) if tracking_url is not None or task.status != RUNNING: task.tracking_url = tracking_url if task.remove is not None: task.remove = None # unmark task for removal so it isn't removed after being added if expl is not None: task.expl = expl if not (task.status == RUNNING and status == PENDING) or new_deps: # don't allow re-scheduling of task while it is running, it must either fail or succeed first if status == PENDING or status != task.status: # Update the DB only if there was a acctual change, to prevent noise. # We also check for status == PENDING b/c that's the default value # (so checking for status != task.status woule lie) self._update_task_history(task, status) self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config) if status == FAILED: task.retry = self._retry_time(task, self._config) if deps is not None: task.deps = set(deps) if new_deps is not None: task.deps.update(new_deps) if resources is not None: task.resources = resources if worker_enabled and not assistant: task.stakeholders.add(worker_id) # Task dependencies might not exist yet. Let's create dummy tasks for them for now. # Otherwise the task dependencies might end up being pruned if scheduling takes a long time for dep in task.deps or []: t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority)) t.stakeholders.add(worker_id) self._update_priority(task, priority, worker_id) if runnable and status != FAILED and worker_enabled: task.workers.add(worker_id) self._state.get_worker(worker_id).tasks.add(task) task.runnable = runnable def add_worker(self, worker, info, **kwargs): self._state.get_worker(worker).add_info(info) def disable_worker(self, worker): self._state.disable_workers({worker}) def update_resources(self, **resources): if self._resources is None: self._resources = {} self._resources.update(resources) def _has_resources(self, needed_resources, used_resources): if needed_resources is None: return True available_resources = self._resources or {} for resource, amount in six.iteritems(needed_resources): if amount + used_resources[resource] > available_resources.get(resource, 1): return False return True def _used_resources(self): used_resources = collections.defaultdict(int) if self._resources is not None: for task in self._state.get_active_tasks(status=RUNNING): if task.resources: for resource, amount in six.iteritems(task.resources): used_resources[resource] += amount return used_resources def _rank(self, task): """ Return worker's rank function for task scheduling. :return: """ return task.priority, -task.time def _schedulable(self, task): if task.status != PENDING: return False for dep in task.deps: dep_task = self._state.get_task(dep, default=None) if dep_task is None or dep_task.status != DONE: return False return True def _retry_time(self, task, config): return time.time() + config.retry_delay def get_work(self, host=None, assistant=False, current_tasks=None, **kwargs): # TODO: remove any expired nodes # Algo: iterate over all nodes, find the highest priority node no dependencies and available # resources. # Resource checking looks both at currently available resources and at which resources would # be available if all running tasks died and we rescheduled all workers greedily. We do both # checks in order to prevent a worker with many low-priority tasks from starving other # workers with higher priority tasks that share the same resources. # TODO: remove tasks that can't be done, figure out if the worker has absolutely # nothing it can wait for if self._config.prune_on_get_work: self.prune() worker_id = kwargs['worker'] # Return remaining tasks that have no FAILED descendants self.update(worker_id, {'host': host}, get_work=True) if assistant: self.add_worker(worker_id, [('assistant', assistant)]) best_task = None if current_tasks is not None: ct_set = set(current_tasks) for task in sorted(self._state.get_running_tasks(), key=self._rank): if task.worker_running == worker_id and task.id not in ct_set: best_task = task locally_pending_tasks = 0 running_tasks = [] upstream_table = {} greedy_resources = collections.defaultdict(int) n_unique_pending = 0 worker = self._state.get_worker(worker_id) if worker.is_trivial_worker(self._state): relevant_tasks = worker.get_pending_tasks(self._state) used_resources = collections.defaultdict(int) greedy_workers = dict() # If there's no resources, then they can grab any task else: relevant_tasks = self._state.get_pending_tasks() used_resources = self._used_resources() activity_limit = time.time() - self._config.worker_disconnect_delay active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit) greedy_workers = dict((worker.id, worker.info.get('workers', 1)) for worker in active_workers) tasks = list(relevant_tasks) tasks.sort(key=self._rank, reverse=True) for task in tasks: in_workers = (assistant and getattr(task, 'runnable', bool(task.workers))) or worker_id in task.workers if task.status == RUNNING and in_workers: # Return a list of currently running tasks to the client, # makes it easier to troubleshoot other_worker = self._state.get_worker(task.worker_running) more_info = {'task_id': task.id, 'worker': str(other_worker)} if other_worker is not None: more_info.update(other_worker.info) running_tasks.append(more_info) if task.status == PENDING and in_workers: upstream_status = self._upstream_status(task.id, upstream_table) if upstream_status != UPSTREAM_DISABLED: locally_pending_tasks += 1 if len(task.workers) == 1 and not assistant: n_unique_pending += 1 if best_task: continue if task.status == RUNNING and (task.worker_running in greedy_workers): greedy_workers[task.worker_running] -= 1 for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount if self._schedulable(task) and self._has_resources(task.resources, greedy_resources): if in_workers and self._has_resources(task.resources, used_resources): best_task = task else: workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers for task_worker in workers: if greedy_workers.get(task_worker, 0) > 0: # use up a worker greedy_workers[task_worker] -= 1 # keep track of the resources used in greedy scheduling for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount break reply = {'n_pending_tasks': locally_pending_tasks, 'running_tasks': running_tasks, 'task_id': None, 'n_unique_pending': n_unique_pending} if best_task: self._state.set_status(best_task, RUNNING, self._config) best_task.worker_running = worker_id best_task.time_running = time.time() self._update_task_history(best_task, RUNNING, host=host) reply['task_id'] = best_task.id reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = best_task.params return reply def ping(self, **kwargs): worker_id = kwargs['worker'] self.update(worker_id) def _upstream_status(self, task_id, upstream_status_table): if task_id in upstream_status_table: return upstream_status_table[task_id] elif self._state.has_task(task_id): task_stack = [task_id] while task_stack: dep_id = task_stack.pop() dep = self._state.get_task(dep_id) if dep: if dep.status == DONE: continue if dep_id not in upstream_status_table: if dep.status == PENDING and dep.deps: task_stack += [dep_id] + list(dep.deps) upstream_status_table[dep_id] = '' # will be updated postorder else: dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '') upstream_status_table[dep_id] = dep_status elif upstream_status_table[dep_id] == '' and dep.deps: # This is the postorder update step when we set the # status based on the previously calculated child elements status = max((upstream_status_table.get(a_task_id, '') for a_task_id in dep.deps), key=UPSTREAM_SEVERITY_KEY) upstream_status_table[dep_id] = status return upstream_status_table[dep_id] def _serialize_task(self, task_id, include_deps=True, deps=None): task = self._state.get_task(task_id) ret = { 'display_name': task.pretty_id, 'status': task.status, 'workers': list(task.workers), 'worker_running': task.worker_running, 'time_running': getattr(task, "time_running", None), 'start_time': task.time, 'last_updated': getattr(task, "updated", task.time), 'params': task.params, 'name': task.family, 'priority': task.priority, 'resources': task.resources, 'tracking_url': getattr(task, "tracking_url", None), } if task.status == DISABLED: ret['re_enable_able'] = task.scheduler_disable_time is not None if include_deps: ret['deps'] = list(task.deps if deps is None else deps) return ret def graph(self, **kwargs): self.prune() serialized = {} seen = set() for task in self._state.get_active_tasks(): serialized.update(self._traverse_graph(task.id, seen)) return serialized def _filter_done(self, task_ids): for task_id in task_ids: task = self._state.get_task(task_id) if task is None or task.status != DONE: yield task_id def _traverse_graph(self, root_task_id, seen=None, dep_func=None, include_done=True): """ Returns the dependency graph rooted at task_id This does a breadth-first traversal to find the nodes closest to the root before hitting the scheduler.max_graph_nodes limit. :param root_task_id: the id of the graph's root :return: A map of task id to serialized node """ if seen is None: seen = set() elif root_task_id in seen: return {} if dep_func is None: def dep_func(t): return t.deps seen.add(root_task_id) serialized = {} queue = collections.deque([root_task_id]) while queue: task_id = queue.popleft() task = self._state.get_task(task_id) if task is None or not task.family: logger.warn('Missing task for id [%s]', task_id) # NOTE : If a dependency is missing from self._state there is no way to deduce the # task family and parameters. family_match = TASK_FAMILY_RE.match(task_id) family = family_match.group(1) if family_match else UNKNOWN params = {'task_id': task_id} serialized[task_id] = { 'deps': [], 'status': UNKNOWN, 'workers': [], 'start_time': UNKNOWN, 'params': params, 'name': family, 'display_name': task_id, 'priority': 0, } else: deps = dep_func(task) if not include_done: deps = list(self._filter_done(deps)) serialized[task_id] = self._serialize_task(task_id, deps=deps) for dep in sorted(deps): if dep not in seen: seen.add(dep) queue.append(dep) if task_id != root_task_id: del serialized[task_id]['display_name'] if len(serialized) >= self._config.max_graph_nodes: break return serialized def dep_graph(self, task_id, include_done=True, **kwargs): self.prune() if not self._state.has_task(task_id): return {} return self._traverse_graph(task_id, include_done=include_done) def inverse_dep_graph(self, task_id, include_done=True, **kwargs): self.prune() if not self._state.has_task(task_id): return {} inverse_graph = collections.defaultdict(set) for task in self._state.get_active_tasks(): for dep in task.deps: inverse_graph[dep].add(task.id) return self._traverse_graph( task_id, dep_func=lambda t: inverse_graph[t.id], include_done=include_done) def task_list(self, status, upstream_status, limit=True, search=None, **kwargs): """ Query for a subset of tasks by status. """ self.prune() result = {} upstream_status_table = {} # used to memoize upstream status if search is None: def filter_func(_): return True else: terms = search.split() def filter_func(t): return all(term in t.pretty_id for term in terms) for task in filter(filter_func, self._state.get_active_tasks(status)): if (task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table)): serialized = self._serialize_task(task.id, False) result[task.id] = serialized if limit and len(result) > self._config.max_shown_tasks: return {'num_tasks': len(result)} return result def _first_task_display_name(self, worker): task_id = worker.info.get('first_task', '') if self._state.has_task(task_id): return self._state.get_task(task_id).pretty_id else: return task_id def worker_list(self, include_running=True, **kwargs): self.prune() workers = [ dict( name=worker.id, last_active=worker.last_active, started=getattr(worker, 'started', None), first_task_display_name=self._first_task_display_name(worker), **worker.info ) for worker in self._state.get_active_workers()] workers.sort(key=lambda worker: worker['started'], reverse=True) if include_running: running = collections.defaultdict(dict) num_pending = collections.defaultdict(int) num_uniques = collections.defaultdict(int) for task in self._state.get_pending_tasks(): if task.status == RUNNING and task.worker_running: running[task.worker_running][task.id] = self._serialize_task(task.id, False) elif task.status == PENDING: for worker in task.workers: num_pending[worker] += 1 if len(task.workers) == 1: num_uniques[list(task.workers)[0]] += 1 for worker in workers: tasks = running[worker['name']] worker['num_running'] = len(tasks) worker['num_pending'] = num_pending[worker['name']] worker['num_uniques'] = num_uniques[worker['name']] worker['running'] = tasks return workers def resource_list(self): """ Resources usage info and their consumers (tasks). """ self.prune() resources = [ dict( name=resource, num_total=r_dict['total'], num_used=r_dict['used'] ) for resource, r_dict in six.iteritems(self.resources())] if self._resources is not None: consumers = collections.defaultdict(dict) for task in self._state.get_running_tasks(): if task.status == RUNNING and task.resources: for resource, amount in six.iteritems(task.resources): consumers[resource][task.id] = self._serialize_task(task.id, False) for resource in resources: tasks = consumers[resource['name']] resource['num_consumer'] = len(tasks) resource['running'] = tasks return resources def resources(self): ''' get total resources and available ones ''' used_resources = self._used_resources() ret = collections.defaultdict(dict) for resource, total in six.iteritems(self._resources): ret[resource]['total'] = total if resource in used_resources: ret[resource]['used'] = used_resources[resource] else: ret[resource]['used'] = 0 return ret def task_search(self, task_str, **kwargs): """ Query for a subset of tasks by task_id. :param task_str: :return: """ self.prune() result = collections.defaultdict(dict) for task in self._state.get_active_tasks(): if task.id.find(task_str) != -1: serialized = self._serialize_task(task.id, False) result[task.status][task.id] = serialized return result def re_enable_task(self, task_id): serialized = {} task = self._state.get_task(task_id) if task and task.status == DISABLED and task.scheduler_disable_time: self._state.re_enable(task, self._config) serialized = self._serialize_task(task_id) return serialized def fetch_error(self, task_id, **kwargs): if self._state.has_task(task_id): task = self._state.get_task(task_id) return {"taskId": task_id, "error": task.expl, 'displayName': task.pretty_id} else: return {"taskId": task_id, "error": ""} def _update_task_history(self, task, status, host=None): try: if status == DONE or status == FAILED: successful = (status == DONE) self._task_history.task_finished(task, successful) elif status == PENDING: self._task_history.task_scheduled(task) elif status == RUNNING: self._task_history.task_started(task, host) except BaseException: logger.warning("Error saving Task history", exc_info=True) @property def task_history(self): # Used by server.py to expose the calls return self._task_history # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The system for scheduling tasks and executing them in order. Deals with dependencies, priorities, resources, etc. The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them. See :doc:`/central_scheduler` for more info. """ import collections try: import cPickle as pickle except ImportError: import pickle import functools import itertools import logging import os import re import time from luigi import six from luigi import configuration from luigi import notifications from luigi import parameter from luigi import task_history as history from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN from luigi.task import Config logger = logging.getLogger("luigi.server") class Scheduler(object): """ Abstract base class. Note that the methods all take string arguments, not Task objects... """"" add_task = NotImplemented get_work = NotImplemented ping = NotImplemented UPSTREAM_RUNNING = 'UPSTREAM_RUNNING' UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT' UPSTREAM_FAILED = 'UPSTREAM_FAILED' UPSTREAM_DISABLED = 'UPSTREAM_DISABLED' UPSTREAM_SEVERITY_ORDER = ( '', UPSTREAM_RUNNING, UPSTREAM_MISSING_INPUT, UPSTREAM_FAILED, UPSTREAM_DISABLED, ) UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index STATUS_TO_UPSTREAM_MAP = { FAILED: UPSTREAM_FAILED, RUNNING: UPSTREAM_RUNNING, PENDING: UPSTREAM_MISSING_INPUT, DISABLED: UPSTREAM_DISABLED, } TASK_FAMILY_RE = re.compile(r'([^(_]+)[(_]') class scheduler(Config): # TODO(erikbern): the config_path is needed for backwards compatilibity. We # should drop the compatibility at some point retry_delay = parameter.FloatParameter(default=900.0) remove_delay = parameter.FloatParameter(default=600.0) worker_disconnect_delay = parameter.FloatParameter(default=60.0) state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle') # Jobs are disabled if we see more than disable_failures failures in disable_window seconds. # These disables last for disable_persist seconds. disable_window = parameter.IntParameter(default=3600, config_path=dict(section='scheduler', name='disable-window-seconds')) disable_failures = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-num-failures')) disable_hard_timeout = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-hard-timeout')) disable_persist = parameter.IntParameter(default=86400, config_path=dict(section='scheduler', name='disable-persist-seconds')) max_shown_tasks = parameter.IntParameter(default=100000) max_graph_nodes = parameter.IntParameter(default=100000) prune_done_tasks = parameter.BoolParameter(default=False) record_task_history = parameter.BoolParameter(default=False) prune_on_get_work = parameter.BoolParameter(default=False) class Failures(object): """ This class tracks the number of failures in a given time window. Failures added are marked with the current timestamp, and this class counts the number of failures in a sliding time window ending at the present. """ def __init__(self, window): """ Initialize with the given window. :param window: how long to track failures for, as a float (number of seconds). """ self.window = window self.failures = collections.deque() self.first_failure_time = None def add_failure(self): """ Add a failure event with the current timestamp. """ failure_time = time.time() if not self.first_failure_time: self.first_failure_time = failure_time self.failures.append(failure_time) def num_failures(self): """ Return the number of failures in the window. """ min_time = time.time() - self.window while self.failures and self.failures[0] < min_time: self.failures.popleft() return len(self.failures) def clear(self): """ Clear the failure queue. """ self.failures.clear() def _get_default(x, default): if x is not None: return x else: return default class Task(object): def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None, params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None, tracking_url=None): self.id = task_id self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active) self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active if deps is None: self.deps = set() else: self.deps = set(deps) self.status = status # PENDING, RUNNING, FAILED or DONE self.time = time.time() # Timestamp when task was first added self.updated = self.time self.retry = None self.remove = None self.worker_running = None # the worker id that is currently running the task or None self.time_running = None # Timestamp when picked up by worker self.expl = None self.priority = priority self.resources = _get_default(resources, {}) self.family = family self.module = module self.params = _get_default(params, {}) self.disable_failures = disable_failures self.disable_hard_timeout = disable_hard_timeout self.failures = Failures(disable_window) self.tracking_url = tracking_url self.scheduler_disable_time = None self.runnable = False def __repr__(self): return "Task(%r)" % vars(self) def add_failure(self): self.failures.add_failure() def has_excessive_failures(self): if (self.failures.first_failure_time is not None and self.disable_hard_timeout): if (time.time() >= self.failures.first_failure_time + self.disable_hard_timeout): return True if self.failures.num_failures() >= self.disable_failures: return True return False def can_disable(self): return (self.disable_failures is not None or self.disable_hard_timeout is not None) @property def pretty_id(self): param_str = ', '.join('{}={}'.format(key, value) for key, value in self.params.items()) return '{}({})'.format(self.family, param_str) class Worker(object): """ Structure for tracking worker activity and keeping their references. """ def __init__(self, worker_id, last_active=None): self.id = worker_id self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host) self.last_active = last_active or time.time() # seconds since epoch self.last_get_work = None self.started = time.time() # seconds since epoch self.tasks = set() # task objects self.info = {} self.disabled = False def add_info(self, info): self.info.update(info) def update(self, worker_reference, get_work=False): if worker_reference: self.reference = worker_reference self.last_active = time.time() if get_work: self.last_get_work = time.time() def prune(self, config): # Delete workers that haven't said anything for a while (probably killed) if self.last_active + config.worker_disconnect_delay < time.time(): return True def get_pending_tasks(self, state): """ Get PENDING (and RUNNING) tasks for this worker. You have to pass in the state for optimization reasons. """ if len(self.tasks) < state.num_pending_tasks(): return six.moves.filter(lambda task: task.status in [PENDING, RUNNING], self.tasks) else: return state.get_pending_tasks() def is_trivial_worker(self, state): """ If it's not an assistant having only tasks that are without requirements. We have to pass the state parameter for optimization reasons. """ if self.assistant: return False return all(not task.resources for task in self.get_pending_tasks(state)) @property def assistant(self): return self.info.get('assistant', False) def __str__(self): return self.id class SimpleTaskState(object): """ Keep track of the current state and handle persistance. The point of this class is to enable other ways to keep state, eg. by using a database These will be implemented by creating an abstract base class that this and other classes inherit from. """ def __init__(self, state_path): self._state_path = state_path self._tasks = {} # map from id to a Task object self._status_tasks = collections.defaultdict(dict) self._active_workers = {} # map from id to a Worker object def get_state(self): return self._tasks, self._active_workers def set_state(self, state): self._tasks, self._active_workers = state def dump(self): try: with open(self._state_path, 'wb') as fobj: pickle.dump(self.get_state(), fobj) except IOError: logger.warning("Failed saving scheduler state", exc_info=1) else: logger.info("Saved state in %s", self._state_path) # prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control? def load(self): if os.path.exists(self._state_path): logger.info("Attempting to load state from %s", self._state_path) try: with open(self._state_path, 'rb') as fobj: state = pickle.load(fobj) except BaseException: logger.exception("Error when loading state. Starting from clean slate.") return self.set_state(state) self._status_tasks = collections.defaultdict(dict) for task in six.itervalues(self._tasks): self._status_tasks[task.status][task.id] = task else: logger.info("No prior state file exists at %s. Starting with clean slate", self._state_path) def get_active_tasks(self, status=None): if status: for task in six.itervalues(self._status_tasks[status]): yield task else: for task in six.itervalues(self._tasks): yield task def get_running_tasks(self): return six.itervalues(self._status_tasks[RUNNING]) def get_pending_tasks(self): return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status]) for status in [PENDING, RUNNING]) def num_pending_tasks(self): """ Return how many tasks are PENDING + RUNNING. O(1). """ return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING]) def get_task(self, task_id, default=None, setdefault=None): if setdefault: task = self._tasks.setdefault(task_id, setdefault) self._status_tasks[task.status][task.id] = task return task else: return self._tasks.get(task_id, default) def has_task(self, task_id): return task_id in self._tasks def re_enable(self, task, config=None): task.scheduler_disable_time = None task.failures.clear() if config: self.set_status(task, FAILED, config) task.failures.clear() def set_status(self, task, new_status, config=None): if new_status == FAILED: assert config is not None if new_status == DISABLED and task.status == RUNNING: return if task.status == DISABLED: if new_status == DONE: self.re_enable(task) # don't allow workers to override a scheduler disable elif task.scheduler_disable_time is not None and new_status != DISABLED: return if new_status == FAILED and task.can_disable() and task.status != DISABLED: task.add_failure() if task.has_excessive_failures(): task.scheduler_disable_time = time.time() new_status = DISABLED notifications.send_error_email( 'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id), '{task} failed {failures} times in the last {window} seconds, so it is being ' 'disabled for {persist} seconds'.format( failures=config.disable_failures, task=task.id, window=config.disable_window, persist=config.disable_persist, )) elif new_status == DISABLED: task.scheduler_disable_time = None if new_status != task.status: self._status_tasks[task.status].pop(task.id) self._status_tasks[new_status][task.id] = task task.status = new_status task.updated = time.time() def fail_dead_worker_task(self, task, config, assistants): # If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants: logger.info("Task %r is marked as running by disconnected worker %r -> marking as " "FAILED with retry delay of %rs", task.id, task.worker_running, config.retry_delay) task.worker_running = None self.set_status(task, FAILED, config) task.retry = time.time() + config.retry_delay def update_status(self, task, config): # Mark tasks with no remaining active stakeholders for deletion if not task.stakeholders: if task.remove is None: logger.info("Task %r has stakeholders %r but none remain connected -> might remove " "task in %s seconds", task.id, task.stakeholders, config.remove_delay) task.remove = time.time() + config.remove_delay # Re-enable task after the disable time expires if task.status == DISABLED and task.scheduler_disable_time is not None: if time.time() - task.scheduler_disable_time > config.disable_persist: self.re_enable(task, config) # Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0 if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time(): self.set_status(task, PENDING, config) def may_prune(self, task): return task.remove and time.time() > task.remove def inactivate_tasks(self, delete_tasks): # The terminology is a bit confusing: we used to "delete" tasks when they became inactive, # but with a pluggable state storage, you might very well want to keep some history of # older tasks as well. That's why we call it "inactivate" (as in the verb) for task in delete_tasks: task_obj = self._tasks.pop(task) self._status_tasks[task_obj.status].pop(task) def get_active_workers(self, last_active_lt=None, last_get_work_gt=None): for worker in six.itervalues(self._active_workers): if last_active_lt is not None and worker.last_active >= last_active_lt: continue last_get_work = getattr(worker, 'last_get_work', None) if last_get_work_gt is not None and ( last_get_work is None or last_get_work <= last_get_work_gt): continue yield worker def get_assistants(self, last_active_lt=None): return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt)) def get_worker_ids(self): return self._active_workers.keys() # only used for unit tests def get_worker(self, worker_id): return self._active_workers.setdefault(worker_id, Worker(worker_id)) def inactivate_workers(self, delete_workers): # Mark workers as inactive for worker in delete_workers: self._active_workers.pop(worker) self._remove_workers_from_tasks(delete_workers) def _remove_workers_from_tasks(self, workers, remove_stakeholders=True): for task in self.get_active_tasks(): if remove_stakeholders: task.stakeholders.difference_update(workers) task.workers.difference_update(workers) def disable_workers(self, workers): self._remove_workers_from_tasks(workers, remove_stakeholders=False) for worker in workers: self.get_worker(worker).disabled = True def get_necessary_tasks(self): necessary_tasks = set() for task in self.get_active_tasks(): if task.status not in (DONE, DISABLED, UNKNOWN) or \ task.scheduler_disable_time is not None: necessary_tasks.update(task.deps) necessary_tasks.add(task.id) return necessary_tasks class CentralPlannerScheduler(Scheduler): """ Async scheduler that can handle multiple workers, etc. Can be run locally or on a server (using RemoteScheduler + server.Server). """ def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs): """ Keyword Arguments: :param config: an object of class "scheduler" or None (in which the global instance will be used) :param resources: a dict of str->int constraints :param task_history_override: ignore config and use this object as the task history """ self._config = config or scheduler(**kwargs) self._state = SimpleTaskState(self._config.state_path) if task_history_impl: self._task_history = task_history_impl elif self._config.record_task_history: from luigi import db_task_history # Needs sqlalchemy, thus imported here self._task_history = db_task_history.DbTaskHistory() else: self._task_history = history.NopHistory() self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter? self._make_task = functools.partial( Task, disable_failures=self._config.disable_failures, disable_hard_timeout=self._config.disable_hard_timeout, disable_window=self._config.disable_window) self._worker_requests = {} def load(self): self._state.load() def dump(self): self._state.dump() def prune(self): logger.info("Starting pruning of task graph") self._prune_workers() self._prune_tasks() logger.info("Done pruning task graph") def _prune_workers(self): remove_workers = [] for worker in self._state.get_active_workers(): if worker.prune(self._config): logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay) remove_workers.append(worker.id) self._state.inactivate_workers(remove_workers) def _prune_tasks(self): assistant_ids = set(w.id for w in self._state.get_assistants()) remove_tasks = [] if assistant_ids: necessary_tasks = self._state.get_necessary_tasks() else: necessary_tasks = () for task in self._state.get_active_tasks(): self._state.fail_dead_worker_task(task, self._config, assistant_ids) self._state.update_status(task, self._config) if self._state.may_prune(task) and task.id not in necessary_tasks: logger.info("Removing task %r", task.id) remove_tasks.append(task.id) self._state.inactivate_tasks(remove_tasks) def update(self, worker_id, worker_reference=None, get_work=False): """ Keep track of whenever the worker was last active. """ worker = self._state.get_worker(worker_id) worker.update(worker_reference, get_work=get_work) return not getattr(worker, 'disabled', False) def _update_priority(self, task, prio, worker): """ Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled. """ task.priority = prio = max(prio, task.priority) for dep in task.deps or []: t = self._state.get_task(dep) if t is not None and prio > t.priority: self._update_priority(t, prio, worker) def add_task(self, task_id=None, status=PENDING, runnable=True, deps=None, new_deps=None, expl=None, resources=None, priority=0, family='', module=None, params=None, assistant=False, tracking_url=None, **kwargs): """ * add task identified by task_id if it doesn't exist * if deps is not None, update dependency list * update status of task * add additional workers/stakeholders * update priority when needed """ worker_id = kwargs['worker'] worker_enabled = self.update(worker_id) if worker_enabled: _default_task = self._make_task( task_id=task_id, status=PENDING, deps=deps, resources=resources, priority=priority, family=family, module=module, params=params, ) else: _default_task = None task = self._state.get_task(task_id, setdefault=_default_task) if task is None or (task.status != RUNNING and not worker_enabled): return # for setting priority, we'll sometimes create tasks with unset family and params if not task.family: task.family = family if not getattr(task, 'module', None): task.module = module if not task.params: task.params = _get_default(params, {}) if tracking_url is not None or task.status != RUNNING: task.tracking_url = tracking_url if task.remove is not None: task.remove = None # unmark task for removal so it isn't removed after being added if expl is not None: task.expl = expl if not (task.status == RUNNING and status == PENDING) or new_deps: # don't allow re-scheduling of task while it is running, it must either fail or succeed first if status == PENDING or status != task.status: # Update the DB only if there was a acctual change, to prevent noise. # We also check for status == PENDING b/c that's the default value # (so checking for status != task.status woule lie) self._update_task_history(task, status) self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config) if status == FAILED: task.retry = self._retry_time(task, self._config) if deps is not None: task.deps = set(deps) if new_deps is not None: task.deps.update(new_deps) if resources is not None: task.resources = resources if worker_enabled and not assistant: task.stakeholders.add(worker_id) # Task dependencies might not exist yet. Let's create dummy tasks for them for now. # Otherwise the task dependencies might end up being pruned if scheduling takes a long time for dep in task.deps or []: t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority)) t.stakeholders.add(worker_id) self._update_priority(task, priority, worker_id) if runnable and status != FAILED and worker_enabled: task.workers.add(worker_id) self._state.get_worker(worker_id).tasks.add(task) task.runnable = runnable def add_worker(self, worker, info, **kwargs): self._state.get_worker(worker).add_info(info) def disable_worker(self, worker): self._state.disable_workers({worker}) def update_resources(self, **resources): if self._resources is None: self._resources = {} self._resources.update(resources) def _has_resources(self, needed_resources, used_resources): if needed_resources is None: return True available_resources = self._resources or {} for resource, amount in six.iteritems(needed_resources): if amount + used_resources[resource] > available_resources.get(resource, 1): return False return True def _used_resources(self): used_resources = collections.defaultdict(int) if self._resources is not None: for task in self._state.get_active_tasks(status=RUNNING): if task.resources: for resource, amount in six.iteritems(task.resources): used_resources[resource] += amount return used_resources def _rank(self, task): """ Return worker's rank function for task scheduling. :return: """ return task.priority, -task.time def _schedulable(self, task): if task.status != PENDING: return False for dep in task.deps: dep_task = self._state.get_task(dep, default=None) if dep_task is None or dep_task.status != DONE: return False return True def _retry_time(self, task, config): return time.time() + config.retry_delay def get_work(self, host=None, assistant=False, current_tasks=None, **kwargs): # TODO: remove any expired nodes # Algo: iterate over all nodes, find the highest priority node no dependencies and available # resources. # Resource checking looks both at currently available resources and at which resources would # be available if all running tasks died and we rescheduled all workers greedily. We do both # checks in order to prevent a worker with many low-priority tasks from starving other # workers with higher priority tasks that share the same resources. # TODO: remove tasks that can't be done, figure out if the worker has absolutely # nothing it can wait for if self._config.prune_on_get_work: self.prune() worker_id = kwargs['worker'] # Return remaining tasks that have no FAILED descendants self.update(worker_id, {'host': host}, get_work=True) if assistant: self.add_worker(worker_id, [('assistant', assistant)]) best_task = None if current_tasks is not None: ct_set = set(current_tasks) for task in sorted(self._state.get_running_tasks(), key=self._rank): if task.worker_running == worker_id and task.id not in ct_set: best_task = task locally_pending_tasks = 0 running_tasks = [] upstream_table = {} greedy_resources = collections.defaultdict(int) n_unique_pending = 0 worker = self._state.get_worker(worker_id) if worker.is_trivial_worker(self._state): relevant_tasks = worker.get_pending_tasks(self._state) used_resources = collections.defaultdict(int) greedy_workers = dict() # If there's no resources, then they can grab any task else: relevant_tasks = self._state.get_pending_tasks() used_resources = self._used_resources() activity_limit = time.time() - self._config.worker_disconnect_delay active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit) greedy_workers = dict((worker.id, worker.info.get('workers', 1)) for worker in active_workers) tasks = list(relevant_tasks) tasks.sort(key=self._rank, reverse=True) for task in tasks: in_workers = (assistant and getattr(task, 'runnable', bool(task.workers))) or worker_id in task.workers if task.status == RUNNING and in_workers: # Return a list of currently running tasks to the client, # makes it easier to troubleshoot other_worker = self._state.get_worker(task.worker_running) more_info = {'task_id': task.id, 'worker': str(other_worker)} if other_worker is not None: more_info.update(other_worker.info) running_tasks.append(more_info) if task.status == PENDING and in_workers: upstream_status = self._upstream_status(task.id, upstream_table) if upstream_status != UPSTREAM_DISABLED: locally_pending_tasks += 1 if len(task.workers) == 1 and not assistant: n_unique_pending += 1 if best_task: continue if task.status == RUNNING and (task.worker_running in greedy_workers): greedy_workers[task.worker_running] -= 1 for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount if self._schedulable(task) and self._has_resources(task.resources, greedy_resources): if in_workers and self._has_resources(task.resources, used_resources): best_task = task else: workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers for task_worker in workers: if greedy_workers.get(task_worker, 0) > 0: # use up a worker greedy_workers[task_worker] -= 1 # keep track of the resources used in greedy scheduling for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount break reply = {'n_pending_tasks': locally_pending_tasks, 'running_tasks': running_tasks, 'task_id': None, 'n_unique_pending': n_unique_pending} if best_task: self._state.set_status(best_task, RUNNING, self._config) best_task.worker_running = worker_id best_task.time_running = time.time() self._update_task_history(best_task, RUNNING, host=host) reply['task_id'] = best_task.id reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = best_task.params return reply def ping(self, **kwargs): worker_id = kwargs['worker'] self.update(worker_id) def _upstream_status(self, task_id, upstream_status_table): if task_id in upstream_status_table: return upstream_status_table[task_id] elif self._state.has_task(task_id): task_stack = [task_id] while task_stack: dep_id = task_stack.pop() dep = self._state.get_task(dep_id) if dep: if dep.status == DONE: continue if dep_id not in upstream_status_table: if dep.status == PENDING and dep.deps: task_stack += [dep_id] + list(dep.deps) upstream_status_table[dep_id] = '' # will be updated postorder else: dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '') upstream_status_table[dep_id] = dep_status elif upstream_status_table[dep_id] == '' and dep.deps: # This is the postorder update step when we set the # status based on the previously calculated child elements status = max((upstream_status_table.get(a_task_id, '') for a_task_id in dep.deps), key=UPSTREAM_SEVERITY_KEY) upstream_status_table[dep_id] = status return upstream_status_table[dep_id] def _serialize_task(self, task_id, include_deps=True, deps=None): task = self._state.get_task(task_id) ret = { 'display_name': task.pretty_id, 'status': task.status, 'workers': list(task.workers), 'worker_running': task.worker_running, 'time_running': getattr(task, "time_running", None), 'start_time': task.time, 'last_updated': getattr(task, "updated", task.time), 'params': task.params, 'name': task.family, 'priority': task.priority, 'resources': task.resources, 'tracking_url': getattr(task, "tracking_url", None), } if task.status == DISABLED: ret['re_enable_able'] = task.scheduler_disable_time is not None if include_deps: ret['deps'] = list(task.deps if deps is None else deps) return ret def graph(self, **kwargs): self.prune() serialized = {} seen = set() for task in self._state.get_active_tasks(): serialized.update(self._traverse_graph(task.id, seen)) return serialized def _filter_done(self, task_ids): for task_id in task_ids: task = self._state.get_task(task_id) if task is None or task.status != DONE: yield task_id def _traverse_graph(self, root_task_id, seen=None, dep_func=None, include_done=True): """ Returns the dependency graph rooted at task_id This does a breadth-first traversal to find the nodes closest to the root before hitting the scheduler.max_graph_nodes limit. :param root_task_id: the id of the graph's root :return: A map of task id to serialized node """ if seen is None: seen = set() elif root_task_id in seen: return {} if dep_func is None: def dep_func(t): return t.deps seen.add(root_task_id) serialized = {} queue = collections.deque([root_task_id]) while queue: task_id = queue.popleft() task = self._state.get_task(task_id) if task is None or not task.family: logger.warn('Missing task for id [%s]', task_id) # NOTE : If a dependency is missing from self._state there is no way to deduce the # task family and parameters. family_match = TASK_FAMILY_RE.match(task_id) family = family_match.group(1) if family_match else UNKNOWN params = {'task_id': task_id} serialized[task_id] = { 'deps': [], 'status': UNKNOWN, 'workers': [], 'start_time': UNKNOWN, 'params': params, 'name': family, 'display_name': task_id, 'priority': 0, } else: deps = dep_func(task) if not include_done: deps = list(self._filter_done(deps)) serialized[task_id] = self._serialize_task(task_id, deps=deps) for dep in sorted(deps): if dep not in seen: seen.add(dep) queue.append(dep) if task_id != root_task_id: del serialized[task_id]['display_name'] if len(serialized) >= self._config.max_graph_nodes: break return serialized def dep_graph(self, task_id, include_done=True, **kwargs): self.prune() if not self._state.has_task(task_id): return {} return self._traverse_graph(task_id, include_done=include_done) def inverse_dep_graph(self, task_id, include_done=True, **kwargs): self.prune() if not self._state.has_task(task_id): return {} inverse_graph = collections.defaultdict(set) for task in self._state.get_active_tasks(): for dep in task.deps: inverse_graph[dep].add(task.id) return self._traverse_graph( task_id, dep_func=lambda t: inverse_graph[t.id], include_done=include_done) def task_list(self, status, upstream_status, limit=True, search=None, **kwargs): """ Query for a subset of tasks by status. """ self.prune() result = {} upstream_status_table = {} # used to memoize upstream status if search is None: def filter_func(_): return True else: terms = search.split() def filter_func(t): return all(term in t.pretty_id for term in terms) for task in filter(filter_func, self._state.get_active_tasks(status)): if (task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table)): serialized = self._serialize_task(task.id, False) result[task.id] = serialized if limit and len(result) > self._config.max_shown_tasks: return {'num_tasks': len(result)} return result def _first_task_display_name(self, worker): task_id = worker.info.get('first_task', '') if self._state.has_task(task_id): return self._state.get_task(task_id).pretty_id else: return task_id def worker_list(self, include_running=True, **kwargs): self.prune() workers = [ dict( name=worker.id, last_active=worker.last_active, started=getattr(worker, 'started', None), first_task_display_name=self._first_task_display_name(worker), **worker.info ) for worker in self._state.get_active_workers()] workers.sort(key=lambda worker: worker['started'], reverse=True) if include_running: running = collections.defaultdict(dict) num_pending = collections.defaultdict(int) num_uniques = collections.defaultdict(int) for task in self._state.get_pending_tasks(): if task.status == RUNNING and task.worker_running: running[task.worker_running][task.id] = self._serialize_task(task.id, False) elif task.status == PENDING: for worker in task.workers: num_pending[worker] += 1 if len(task.workers) == 1: num_uniques[list(task.workers)[0]] += 1 for worker in workers: tasks = running[worker['name']] worker['num_running'] = len(tasks) worker['num_pending'] = num_pending[worker['name']] worker['num_uniques'] = num_uniques[worker['name']] worker['running'] = tasks return workers def resource_list(self): """ Resources usage info and their consumers (tasks). """ self.prune() resources = [ dict( name=resource, num_total=r_dict['total'], num_used=r_dict['used'] ) for resource, r_dict in six.iteritems(self.resources())] if self._resources is not None: consumers = collections.defaultdict(dict) for task in self._state.get_running_tasks(): if task.status == RUNNING and task.resources: for resource, amount in six.iteritems(task.resources): consumers[resource][task.id] = self._serialize_task(task.id, False) for resource in resources: tasks = consumers[resource['name']] resource['num_consumer'] = len(tasks) resource['running'] = tasks return resources def resources(self): ''' get total resources and available ones ''' used_resources = self._used_resources() ret = collections.defaultdict(dict) for resource, total in six.iteritems(self._resources): ret[resource]['total'] = total if resource in used_resources: ret[resource]['used'] = used_resources[resource] else: ret[resource]['used'] = 0 return ret def task_search(self, task_str, **kwargs): """ Query for a subset of tasks by task_id. :param task_str: :return: """ self.prune() result = collections.defaultdict(dict) for task in self._state.get_active_tasks(): if task.id.find(task_str) != -1: serialized = self._serialize_task(task.id, False) result[task.status][task.id] = serialized return result def re_enable_task(self, task_id): serialized = {} task = self._state.get_task(task_id) if task and task.status == DISABLED and task.scheduler_disable_time: self._state.re_enable(task, self._config) serialized = self._serialize_task(task_id) return serialized def fetch_error(self, task_id, **kwargs): if self._state.has_task(task_id): task = self._state.get_task(task_id) return {"taskId": task_id, "error": task.expl, 'displayName': task.pretty_id} else: return {"taskId": task_id, "error": ""} def _update_task_history(self, task, status, host=None): try: if status == DONE or status == FAILED: successful = (status == DONE) self._task_history.task_finished(task, successful) elif status == PENDING: self._task_history.task_scheduled(task) elif status == RUNNING: self._task_history.task_started(task, host) except BaseException: logger.warning("Error saving Task history", exc_info=True) @property def task_history(self): # Used by server.py to expose the calls return self._task_history
BugsInPy/BugsInPy/temp/projects/luigi/bug-15-fixed/luigi/luigi/scheduler.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-15-buggy/luigi/luigi/scheduler.py
luigi-bug-31
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The system for scheduling tasks and executing them in order. Deals with dependencies, priorities, resources, etc. The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them. See :doc:`/central_scheduler` for more info. """ import collections try: import cPickle as pickle except ImportError: import pickle import datetime import functools import itertools import logging import os import time from luigi import six from luigi import configuration from luigi import notifications from luigi import parameter from luigi import task_history as history from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN from luigi.task import Config logger = logging.getLogger("luigi.server") class Scheduler(object): """ Abstract base class. Note that the methods all take string arguments, not Task objects... """"" add_task = NotImplemented get_work = NotImplemented ping = NotImplemented UPSTREAM_RUNNING = 'UPSTREAM_RUNNING' UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT' UPSTREAM_FAILED = 'UPSTREAM_FAILED' UPSTREAM_DISABLED = 'UPSTREAM_DISABLED' UPSTREAM_SEVERITY_ORDER = ( '', UPSTREAM_RUNNING, UPSTREAM_MISSING_INPUT, UPSTREAM_FAILED, UPSTREAM_DISABLED, ) UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index STATUS_TO_UPSTREAM_MAP = { FAILED: UPSTREAM_FAILED, RUNNING: UPSTREAM_RUNNING, PENDING: UPSTREAM_MISSING_INPUT, DISABLED: UPSTREAM_DISABLED, } class scheduler(Config): # TODO(erikbern): the config_path is needed for backwards compatilibity. We should drop the compatibility # at some point (in particular this would force users to replace all dashes with underscores in the config) retry_delay = parameter.FloatParameter(default=900.0, config_path=dict(section='scheduler', name='retry-delay')) remove_delay = parameter.FloatParameter(default=600.0, config_path=dict(section='scheduler', name='remove-delay')) worker_disconnect_delay = parameter.FloatParameter(default=60.0, config_path=dict(section='scheduler', name='worker-disconnect-delay')) state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle', config_path=dict(section='scheduler', name='state-path')) # Jobs are disabled if we see more than disable_failures failures in disable_window seconds. # These disables last for disable_persist seconds. disable_window = parameter.IntParameter(default=3600, config_path=dict(section='scheduler', name='disable-window-seconds')) disable_failures = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-num-failures')) disable_persist = parameter.IntParameter(default=86400, config_path=dict(section='scheduler', name='disable-persist-seconds')) max_shown_tasks = parameter.IntParameter(default=100000, config_path=dict(section='scheduler', name='max-shown-task')) record_task_history = parameter.BoolParameter(default=False, config_path=dict(section='scheduler', name='record_task_history')) def fix_time(x): # Backwards compatibility for a fix in Dec 2014. Prior to the fix, pickled state might store datetime objects # Let's remove this function soon if isinstance(x, datetime.datetime): return time.mktime(x.timetuple()) else: return x class Failures(object): """ This class tracks the number of failures in a given time window. Failures added are marked with the current timestamp, and this class counts the number of failures in a sliding time window ending at the present. """ def __init__(self, window): """ Initialize with the given window. :param window: how long to track failures for, as a float (number of seconds). """ self.window = window self.failures = collections.deque() def add_failure(self): """ Add a failure event with the current timestamp. """ self.failures.append(time.time()) def num_failures(self): """ Return the number of failures in the window. """ min_time = time.time() - self.window while self.failures and fix_time(self.failures[0]) < min_time: self.failures.popleft() return len(self.failures) def clear(self): """ Clear the failure queue. """ self.failures.clear() def _get_default(x, default): if x is not None: return x else: return default class Task(object): def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None, params=None, disable_failures=None, disable_window=None): self.id = task_id self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active) self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active if deps is None: self.deps = set() else: self.deps = set(deps) self.status = status # PENDING, RUNNING, FAILED or DONE self.time = time.time() # Timestamp when task was first added self.retry = None self.remove = None self.worker_running = None # the worker id that is currently running the task or None self.time_running = None # Timestamp when picked up by worker self.expl = None self.priority = priority self.resources = _get_default(resources, {}) self.family = family self.module = module self.params = _get_default(params, {}) self.disable_failures = disable_failures self.failures = Failures(disable_window) self.scheduler_disable_time = None def __repr__(self): return "Task(%r)" % vars(self) def add_failure(self): self.failures.add_failure() def has_excessive_failures(self): return self.failures.num_failures() >= self.disable_failures def can_disable(self): return self.disable_failures is not None class Worker(object): """ Structure for tracking worker activity and keeping their references. """ def __init__(self, worker_id, last_active=None): self.id = worker_id self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host) self.last_active = last_active # seconds since epoch self.started = time.time() # seconds since epoch self.info = {} def add_info(self, info): self.info.update(info) def update(self, worker_reference): if worker_reference: self.reference = worker_reference self.last_active = time.time() def prune(self, config): # Delete workers that haven't said anything for a while (probably killed) if self.last_active + config.worker_disconnect_delay < time.time(): return True @property def assistant(self): return self.info.get('assistant', False) def __str__(self): return self.id class SimpleTaskState(object): """ Keep track of the current state and handle persistance. The point of this class is to enable other ways to keep state, eg. by using a database These will be implemented by creating an abstract base class that this and other classes inherit from. """ def __init__(self, state_path): self._state_path = state_path self._tasks = {} # map from id to a Task object self._status_tasks = collections.defaultdict(dict) self._active_workers = {} # map from id to a Worker object def dump(self): state = (self._tasks, self._active_workers) try: with open(self._state_path, 'wb') as fobj: pickle.dump(state, fobj) except IOError: logger.warning("Failed saving scheduler state", exc_info=1) else: logger.info("Saved state in %s", self._state_path) # prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control? def load(self): if os.path.exists(self._state_path): logger.info("Attempting to load state from %s", self._state_path) try: with open(self._state_path, 'rb') as fobj: state = pickle.load(fobj) except BaseException: logger.exception("Error when loading state. Starting from clean slate.") return self._tasks, self._active_workers = state self._status_tasks = collections.defaultdict(dict) for task in six.itervalues(self._tasks): self._status_tasks[task.status][task.id] = task # Convert from old format # TODO: this is really ugly, we need something more future-proof # Every time we add an attribute to the Worker class, this code needs to be updated for k, v in six.iteritems(self._active_workers): if isinstance(v, float): self._active_workers[k] = Worker(worker_id=k, last_active=v) else: logger.info("No prior state file exists at %s. Starting with clean slate", self._state_path) def get_active_tasks(self, status=None): if status: for task in six.itervalues(self._status_tasks[status]): yield task else: for task in six.itervalues(self._tasks): yield task def get_running_tasks(self): return six.itervalues(self._status_tasks[RUNNING]) def get_pending_tasks(self): return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status]) for status in [PENDING, RUNNING]) def get_task(self, task_id, default=None, setdefault=None): if setdefault: task = self._tasks.setdefault(task_id, setdefault) self._status_tasks[task.status][task.id] = task return task else: return self._tasks.get(task_id, default) def has_task(self, task_id): return task_id in self._tasks def re_enable(self, task, config=None): task.scheduler_disable_time = None task.failures.clear() if config: self.set_status(task, FAILED, config) task.failures.clear() def set_status(self, task, new_status, config=None): if new_status == FAILED: assert config is not None # not sure why we have SUSPENDED, as it can never be set if new_status == SUSPENDED: new_status = PENDING if new_status == DISABLED and task.status == RUNNING: return if task.status == DISABLED: if new_status == DONE: self.re_enable(task) # don't allow workers to override a scheduler disable elif task.scheduler_disable_time is not None: return if new_status == FAILED and task.can_disable(): task.add_failure() if task.has_excessive_failures(): task.scheduler_disable_time = time.time() new_status = DISABLED notifications.send_error_email( 'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id), '{task} failed {failures} times in the last {window} seconds, so it is being ' 'disabled for {persist} seconds'.format( failures=config.disable_failures, task=task.id, window=config.disable_window, persist=config.disable_persist, )) elif new_status == DISABLED: task.scheduler_disable_time = None self._status_tasks[task.status].pop(task.id) self._status_tasks[new_status][task.id] = task task.status = new_status def prune(self, task, config, assistants): remove = False # Mark tasks with no remaining active stakeholders for deletion if not task.stakeholders: if task.remove is None: logger.info("Task %r has stakeholders %r but none remain connected -> will remove " "task in %s seconds", task.id, task.stakeholders, config.remove_delay) task.remove = time.time() + config.remove_delay # If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants: logger.info("Task %r is marked as running by disconnected worker %r -> marking as " "FAILED with retry delay of %rs", task.id, task.worker_running, config.retry_delay) task.worker_running = None self.set_status(task, FAILED, config) task.retry = time.time() + config.retry_delay # Re-enable task after the disable time expires if task.status == DISABLED and task.scheduler_disable_time: if time.time() - fix_time(task.scheduler_disable_time) > config.disable_persist: self.re_enable(task, config) # Remove tasks that have no stakeholders if task.remove and time.time() > task.remove: logger.info("Removing task %r (no connected stakeholders)", task.id) remove = True # Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0 if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time(): self.set_status(task, PENDING, config) return remove def inactivate_tasks(self, delete_tasks): # The terminology is a bit confusing: we used to "delete" tasks when they became inactive, # but with a pluggable state storage, you might very well want to keep some history of # older tasks as well. That's why we call it "inactivate" (as in the verb) for task in delete_tasks: task_obj = self._tasks.pop(task) self._status_tasks[task_obj.status].pop(task) def get_active_workers(self, last_active_lt=None): for worker in six.itervalues(self._active_workers): if last_active_lt is not None and worker.last_active >= last_active_lt: continue yield worker def get_assistants(self, last_active_lt=None): return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt)) def get_worker_ids(self): return self._active_workers.keys() # only used for unit tests def get_worker(self, worker_id): return self._active_workers.setdefault(worker_id, Worker(worker_id)) def inactivate_workers(self, delete_workers): # Mark workers as inactive for worker in delete_workers: self._active_workers.pop(worker) # remove workers from tasks for task in self.get_active_tasks(): task.stakeholders.difference_update(delete_workers) task.workers.difference_update(delete_workers) class CentralPlannerScheduler(Scheduler): """ Async scheduler that can handle multiple workers, etc. Can be run locally or on a server (using RemoteScheduler + server.Server). """ def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs): """ Keyword Arguments: :param config: an object of class "scheduler" or None (in which the global instance will be used) :param resources: a dict of str->int constraints :param task_history_override: ignore config and use this object as the task history """ self._config = config or scheduler(**kwargs) self._state = SimpleTaskState(self._config.state_path) if task_history_impl: self._task_history = task_history_impl elif self._config.record_task_history: import db_task_history # Needs sqlalchemy, thus imported here self._task_history = db_task_history.DbTaskHistory() else: self._task_history = history.NopHistory() self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter? self._make_task = functools.partial( Task, disable_failures=self._config.disable_failures, disable_window=self._config.disable_window) def load(self): self._state.load() def dump(self): self._state.dump() def prune(self): logger.info("Starting pruning of task graph") remove_workers = [] for worker in self._state.get_active_workers(): if worker.prune(self._config): logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay) remove_workers.append(worker.id) self._state.inactivate_workers(remove_workers) assistant_ids = set(w.id for w in self._state.get_assistants()) remove_tasks = [] for task in self._state.get_active_tasks(): if self._state.prune(task, self._config, assistant_ids): remove_tasks.append(task.id) self._state.inactivate_tasks(remove_tasks) logger.info("Done pruning task graph") def update(self, worker_id, worker_reference=None): """ Keep track of whenever the worker was last active. """ worker = self._state.get_worker(worker_id) worker.update(worker_reference) def _update_priority(self, task, prio, worker): """ Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled. """ task.priority = prio = max(prio, task.priority) for dep in task.deps or []: t = self._state.get_task(dep) if t is not None and prio > t.priority: self._update_priority(t, prio, worker) def add_task(self, worker, task_id, status=PENDING, runnable=True, deps=None, new_deps=None, expl=None, resources=None, priority=0, family='', module=None, params=None, **kwargs): """ * add task identified by task_id if it doesn't exist * if deps is not None, update dependency list * update status of task * add additional workers/stakeholders * update priority when needed """ self.update(worker) task = self._state.get_task(task_id, setdefault=self._make_task( task_id=task_id, status=PENDING, deps=deps, resources=resources, priority=priority, family=family, module=module, params=params)) # for setting priority, we'll sometimes create tasks with unset family and params if not task.family: task.family = family if not getattr(task, 'module', None): task.module = module if not task.params: task.params = _get_default(params, {}) if task.remove is not None: task.remove = None # unmark task for removal so it isn't removed after being added if not (task.status == RUNNING and status == PENDING): # don't allow re-scheduling of task while it is running, it must either fail or succeed first if status == PENDING or status != task.status: # Update the DB only if there was a acctual change, to prevent noise. # We also check for status == PENDING b/c that's the default value # (so checking for status != task.status woule lie) self._update_task_history(task_id, status) self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config) if status == FAILED: task.retry = time.time() + self._config.retry_delay if deps is not None: task.deps = set(deps) if new_deps is not None: task.deps.update(new_deps) if resources is not None: task.resources = resources # only assistants should normally schedule tasks as FAILED and not runnable if runnable or status != FAILED: task.stakeholders.add(worker) # Task dependencies might not exist yet. Let's create dummy tasks for them for now. # Otherwise the task dependencies might end up being pruned if scheduling takes a long time for dep in task.deps or []: t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority)) t.stakeholders.add(worker) self._update_priority(task, priority, worker) if runnable: task.workers.add(worker) if expl is not None: task.expl = expl def add_worker(self, worker, info, **kwargs): self._state.get_worker(worker).add_info(info) def update_resources(self, **resources): if self._resources is None: self._resources = {} self._resources.update(resources) def _has_resources(self, needed_resources, used_resources): if needed_resources is None: return True available_resources = self._resources or {} for resource, amount in six.iteritems(needed_resources): if amount + used_resources[resource] > available_resources.get(resource, 1): return False return True def _used_resources(self): used_resources = collections.defaultdict(int) if self._resources is not None: for task in self._state.get_active_tasks(): if task.status == RUNNING and task.resources: for resource, amount in six.iteritems(task.resources): used_resources[resource] += amount return used_resources def _rank(self): """ Return worker's rank function for task scheduling. :return: """ dependents = collections.defaultdict(int) def not_done(t): task = self._state.get_task(t, default=None) return task is None or task.status != DONE for task in self._state.get_pending_tasks(): if task.status != DONE: deps = list(filter(not_done, task.deps)) inverse_num_deps = 1.0 / max(len(deps), 1) for dep in deps: dependents[dep] += inverse_num_deps return lambda task: (task.priority, dependents[task.id], -task.time) def _schedulable(self, task): if task.status != PENDING: return False for dep in task.deps: dep_task = self._state.get_task(dep, default=None) if dep_task is None or dep_task.status != DONE: return False return True def get_work(self, worker, host=None, assistant=False, **kwargs): # TODO: remove any expired nodes # Algo: iterate over all nodes, find the highest priority node no dependencies and available # resources. # Resource checking looks both at currently available resources and at which resources would # be available if all running tasks died and we rescheduled all workers greedily. We do both # checks in order to prevent a worker with many low-priority tasks from starving other # workers with higher priority tasks that share the same resources. # TODO: remove tasks that can't be done, figure out if the worker has absolutely # nothing it can wait for # Return remaining tasks that have no FAILED descendents self.update(worker, {'host': host}) if assistant: self.add_worker(worker, [('assistant', assistant)]) best_task = None locally_pending_tasks = 0 running_tasks = [] used_resources = self._used_resources() greedy_resources = collections.defaultdict(int) n_unique_pending = 0 greedy_workers = dict((worker.id, worker.info.get('workers', 1)) for worker in self._state.get_active_workers()) tasks = list(self._state.get_pending_tasks()) tasks.sort(key=self._rank(), reverse=True) for task in tasks: in_workers = assistant or worker in task.workers if task.status == 'RUNNING' and in_workers: # Return a list of currently running tasks to the client, # makes it easier to troubleshoot other_worker = self._state.get_worker(task.worker_running) more_info = {'task_id': task.id, 'worker': str(other_worker)} if other_worker is not None: more_info.update(other_worker.info) running_tasks.append(more_info) if task.status == PENDING and in_workers: locally_pending_tasks += 1 if len(task.workers) == 1 and not assistant: n_unique_pending += 1 if task.status == RUNNING and (task.worker_running in greedy_workers): greedy_workers[task.worker_running] -= 1 for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount if not best_task and self._schedulable(task) and self._has_resources(task.resources, greedy_resources): if in_workers and self._has_resources(task.resources, used_resources): best_task = task else: workers = itertools.chain(task.workers, [worker]) if assistant else task.workers for task_worker in workers: if greedy_workers.get(task_worker, 0) > 0: # use up a worker greedy_workers[task_worker] -= 1 # keep track of the resources used in greedy scheduling for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount break reply = {'n_pending_tasks': locally_pending_tasks, 'running_tasks': running_tasks, 'task_id': None, 'n_unique_pending': n_unique_pending} if best_task: self._state.set_status(best_task, RUNNING, self._config) best_task.worker_running = worker best_task.time_running = time.time() self._update_task_history(best_task.id, RUNNING, host=host) reply['task_id'] = best_task.id reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = best_task.params return reply def ping(self, worker, **kwargs): self.update(worker) def _upstream_status(self, task_id, upstream_status_table): if task_id in upstream_status_table: return upstream_status_table[task_id] elif self._state.has_task(task_id): task_stack = [task_id] while task_stack: dep_id = task_stack.pop() if self._state.has_task(dep_id): dep = self._state.get_task(dep_id) if dep_id not in upstream_status_table: if dep.status == PENDING and dep.deps: task_stack = task_stack + [dep_id] + list(dep.deps) upstream_status_table[dep_id] = '' # will be updated postorder else: dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '') upstream_status_table[dep_id] = dep_status elif upstream_status_table[dep_id] == '' and dep.deps: # This is the postorder update step when we set the # status based on the previously calculated child elements upstream_status = [upstream_status_table.get(task_id, '') for task_id in dep.deps] upstream_status.append('') # to handle empty list status = max(upstream_status, key=UPSTREAM_SEVERITY_KEY) upstream_status_table[dep_id] = status return upstream_status_table[dep_id] def _serialize_task(self, task_id, include_deps=True): task = self._state.get_task(task_id) ret = { 'deps': list(task.deps), 'status': task.status, 'workers': list(task.workers), 'worker_running': task.worker_running, 'time_running': getattr(task, "time_running", None), 'start_time': task.time, 'params': task.params, 'name': task.family, 'priority': task.priority, 'resources': task.resources, } if task.status == DISABLED: ret['re_enable_able'] = task.scheduler_disable_time is not None if include_deps: ret['deps'] = list(task.deps) return ret def graph(self, **kwargs): self.prune() serialized = {} for task in self._state.get_active_tasks(): serialized[task.id] = self._serialize_task(task.id) return serialized def _recurse_deps(self, task_id, serialized): if task_id not in serialized: task = self._state.get_task(task_id) if task is None or not task.family: logger.warn('Missing task for id [%s]', task_id) # try to infer family and params from task_id try: family, _, param_str = task_id.rstrip(')').partition('(') params = dict(param.split('=') for param in param_str.split(', ')) except BaseException: family, params = '', {} serialized[task_id] = { 'deps': [], 'status': UNKNOWN, 'workers': [], 'start_time': UNKNOWN, 'params': params, 'name': family, 'priority': 0, } else: serialized[task_id] = self._serialize_task(task_id) for dep in task.deps: self._recurse_deps(dep, serialized) def dep_graph(self, task_id, **kwargs): self.prune() serialized = {} if self._state.has_task(task_id): self._recurse_deps(task_id, serialized) return serialized def task_list(self, status, upstream_status, limit=True, **kwargs): """ Query for a subset of tasks by status. """ self.prune() result = {} upstream_status_table = {} # used to memoize upstream status for task in self._state.get_active_tasks(status): if (task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table)): serialized = self._serialize_task(task.id, False) result[task.id] = serialized if limit and len(result) > self._config.max_shown_tasks: return {'num_tasks': len(result)} return result def worker_list(self, include_running=True, **kwargs): self.prune() workers = [ dict( name=worker.id, last_active=worker.last_active, started=getattr(worker, 'started', None), **worker.info ) for worker in self._state.get_active_workers()] workers.sort(key=lambda worker: worker['started'], reverse=True) if include_running: running = collections.defaultdict(dict) num_pending = collections.defaultdict(int) num_uniques = collections.defaultdict(int) for task in self._state.get_pending_tasks(): if task.status == RUNNING and task.worker_running: running[task.worker_running][task.id] = self._serialize_task(task.id, False) elif task.status == PENDING: for worker in task.workers: num_pending[worker] += 1 if len(task.workers) == 1: num_uniques[list(task.workers)[0]] += 1 for worker in workers: tasks = running[worker['name']] worker['num_running'] = len(tasks) worker['num_pending'] = num_pending[worker['name']] worker['num_uniques'] = num_uniques[worker['name']] worker['running'] = tasks return workers def inverse_dep_graph(self, task_id, **kwargs): self.prune() serialized = {} if self._state.has_task(task_id): self._traverse_inverse_deps(task_id, serialized) return serialized def _traverse_inverse_deps(self, task_id, serialized): stack = [task_id] serialized[task_id] = self._serialize_task(task_id) while len(stack) > 0: curr_id = stack.pop() for task in self._state.get_active_tasks(): if curr_id in task.deps: serialized[curr_id]["deps"].append(task.id) if task.id not in serialized: serialized[task.id] = self._serialize_task(task.id) serialized[task.id]["deps"] = [] stack.append(task.id) def task_search(self, task_str, **kwargs): """ Query for a subset of tasks by task_id. :param task_str: :return: """ self.prune() result = collections.defaultdict(dict) for task in self._state.get_active_tasks(): if task.id.find(task_str) != -1: serialized = self._serialize_task(task.id, False) result[task.status][task.id] = serialized return result def re_enable_task(self, task_id): serialized = {} task = self._state.get_task(task_id) if task and task.status == DISABLED and task.scheduler_disable_time: self._state.re_enable(task, self._config) serialized = self._serialize_task(task_id) return serialized def fetch_error(self, task_id, **kwargs): if self._state.has_task(task_id): return {"taskId": task_id, "error": self._state.get_task(task_id).expl} else: return {"taskId": task_id, "error": ""} def _update_task_history(self, task_id, status, host=None): try: if status == DONE or status == FAILED: successful = (status == DONE) self._task_history.task_finished(task_id, successful) elif status == PENDING: self._task_history.task_scheduled(task_id) elif status == RUNNING: self._task_history.task_started(task_id, host) except BaseException: logger.warning("Error saving Task history", exc_info=True) @property def task_history(self): # Used by server.py to expose the calls return self._task_history # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The system for scheduling tasks and executing them in order. Deals with dependencies, priorities, resources, etc. The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them. See :doc:`/central_scheduler` for more info. """ import collections try: import cPickle as pickle except ImportError: import pickle import datetime import functools import itertools import logging import os import time from luigi import six from luigi import configuration from luigi import notifications from luigi import parameter from luigi import task_history as history from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN from luigi.task import Config logger = logging.getLogger("luigi.server") class Scheduler(object): """ Abstract base class. Note that the methods all take string arguments, not Task objects... """"" add_task = NotImplemented get_work = NotImplemented ping = NotImplemented UPSTREAM_RUNNING = 'UPSTREAM_RUNNING' UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT' UPSTREAM_FAILED = 'UPSTREAM_FAILED' UPSTREAM_DISABLED = 'UPSTREAM_DISABLED' UPSTREAM_SEVERITY_ORDER = ( '', UPSTREAM_RUNNING, UPSTREAM_MISSING_INPUT, UPSTREAM_FAILED, UPSTREAM_DISABLED, ) UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index STATUS_TO_UPSTREAM_MAP = { FAILED: UPSTREAM_FAILED, RUNNING: UPSTREAM_RUNNING, PENDING: UPSTREAM_MISSING_INPUT, DISABLED: UPSTREAM_DISABLED, } class scheduler(Config): # TODO(erikbern): the config_path is needed for backwards compatilibity. We should drop the compatibility # at some point (in particular this would force users to replace all dashes with underscores in the config) retry_delay = parameter.FloatParameter(default=900.0, config_path=dict(section='scheduler', name='retry-delay')) remove_delay = parameter.FloatParameter(default=600.0, config_path=dict(section='scheduler', name='remove-delay')) worker_disconnect_delay = parameter.FloatParameter(default=60.0, config_path=dict(section='scheduler', name='worker-disconnect-delay')) state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle', config_path=dict(section='scheduler', name='state-path')) # Jobs are disabled if we see more than disable_failures failures in disable_window seconds. # These disables last for disable_persist seconds. disable_window = parameter.IntParameter(default=3600, config_path=dict(section='scheduler', name='disable-window-seconds')) disable_failures = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-num-failures')) disable_persist = parameter.IntParameter(default=86400, config_path=dict(section='scheduler', name='disable-persist-seconds')) max_shown_tasks = parameter.IntParameter(default=100000, config_path=dict(section='scheduler', name='max-shown-task')) record_task_history = parameter.BoolParameter(default=False, config_path=dict(section='scheduler', name='record_task_history')) def fix_time(x): # Backwards compatibility for a fix in Dec 2014. Prior to the fix, pickled state might store datetime objects # Let's remove this function soon if isinstance(x, datetime.datetime): return time.mktime(x.timetuple()) else: return x class Failures(object): """ This class tracks the number of failures in a given time window. Failures added are marked with the current timestamp, and this class counts the number of failures in a sliding time window ending at the present. """ def __init__(self, window): """ Initialize with the given window. :param window: how long to track failures for, as a float (number of seconds). """ self.window = window self.failures = collections.deque() def add_failure(self): """ Add a failure event with the current timestamp. """ self.failures.append(time.time()) def num_failures(self): """ Return the number of failures in the window. """ min_time = time.time() - self.window while self.failures and fix_time(self.failures[0]) < min_time: self.failures.popleft() return len(self.failures) def clear(self): """ Clear the failure queue. """ self.failures.clear() def _get_default(x, default): if x is not None: return x else: return default class Task(object): def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None, params=None, disable_failures=None, disable_window=None): self.id = task_id self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active) self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active if deps is None: self.deps = set() else: self.deps = set(deps) self.status = status # PENDING, RUNNING, FAILED or DONE self.time = time.time() # Timestamp when task was first added self.retry = None self.remove = None self.worker_running = None # the worker id that is currently running the task or None self.time_running = None # Timestamp when picked up by worker self.expl = None self.priority = priority self.resources = _get_default(resources, {}) self.family = family self.module = module self.params = _get_default(params, {}) self.disable_failures = disable_failures self.failures = Failures(disable_window) self.scheduler_disable_time = None def __repr__(self): return "Task(%r)" % vars(self) def add_failure(self): self.failures.add_failure() def has_excessive_failures(self): return self.failures.num_failures() >= self.disable_failures def can_disable(self): return self.disable_failures is not None class Worker(object): """ Structure for tracking worker activity and keeping their references. """ def __init__(self, worker_id, last_active=None): self.id = worker_id self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host) self.last_active = last_active # seconds since epoch self.started = time.time() # seconds since epoch self.info = {} def add_info(self, info): self.info.update(info) def update(self, worker_reference): if worker_reference: self.reference = worker_reference self.last_active = time.time() def prune(self, config): # Delete workers that haven't said anything for a while (probably killed) if self.last_active + config.worker_disconnect_delay < time.time(): return True @property def assistant(self): return self.info.get('assistant', False) def __str__(self): return self.id class SimpleTaskState(object): """ Keep track of the current state and handle persistance. The point of this class is to enable other ways to keep state, eg. by using a database These will be implemented by creating an abstract base class that this and other classes inherit from. """ def __init__(self, state_path): self._state_path = state_path self._tasks = {} # map from id to a Task object self._status_tasks = collections.defaultdict(dict) self._active_workers = {} # map from id to a Worker object def dump(self): state = (self._tasks, self._active_workers) try: with open(self._state_path, 'wb') as fobj: pickle.dump(state, fobj) except IOError: logger.warning("Failed saving scheduler state", exc_info=1) else: logger.info("Saved state in %s", self._state_path) # prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control? def load(self): if os.path.exists(self._state_path): logger.info("Attempting to load state from %s", self._state_path) try: with open(self._state_path, 'rb') as fobj: state = pickle.load(fobj) except BaseException: logger.exception("Error when loading state. Starting from clean slate.") return self._tasks, self._active_workers = state self._status_tasks = collections.defaultdict(dict) for task in six.itervalues(self._tasks): self._status_tasks[task.status][task.id] = task # Convert from old format # TODO: this is really ugly, we need something more future-proof # Every time we add an attribute to the Worker class, this code needs to be updated for k, v in six.iteritems(self._active_workers): if isinstance(v, float): self._active_workers[k] = Worker(worker_id=k, last_active=v) else: logger.info("No prior state file exists at %s. Starting with clean slate", self._state_path) def get_active_tasks(self, status=None): if status: for task in six.itervalues(self._status_tasks[status]): yield task else: for task in six.itervalues(self._tasks): yield task def get_running_tasks(self): return six.itervalues(self._status_tasks[RUNNING]) def get_pending_tasks(self): return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status]) for status in [PENDING, RUNNING]) def get_task(self, task_id, default=None, setdefault=None): if setdefault: task = self._tasks.setdefault(task_id, setdefault) self._status_tasks[task.status][task.id] = task return task else: return self._tasks.get(task_id, default) def has_task(self, task_id): return task_id in self._tasks def re_enable(self, task, config=None): task.scheduler_disable_time = None task.failures.clear() if config: self.set_status(task, FAILED, config) task.failures.clear() def set_status(self, task, new_status, config=None): if new_status == FAILED: assert config is not None # not sure why we have SUSPENDED, as it can never be set if new_status == SUSPENDED: new_status = PENDING if new_status == DISABLED and task.status == RUNNING: return if task.status == DISABLED: if new_status == DONE: self.re_enable(task) # don't allow workers to override a scheduler disable elif task.scheduler_disable_time is not None: return if new_status == FAILED and task.can_disable(): task.add_failure() if task.has_excessive_failures(): task.scheduler_disable_time = time.time() new_status = DISABLED notifications.send_error_email( 'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id), '{task} failed {failures} times in the last {window} seconds, so it is being ' 'disabled for {persist} seconds'.format( failures=config.disable_failures, task=task.id, window=config.disable_window, persist=config.disable_persist, )) elif new_status == DISABLED: task.scheduler_disable_time = None self._status_tasks[task.status].pop(task.id) self._status_tasks[new_status][task.id] = task task.status = new_status def prune(self, task, config, assistants): remove = False # Mark tasks with no remaining active stakeholders for deletion if not task.stakeholders: if task.remove is None: logger.info("Task %r has stakeholders %r but none remain connected -> will remove " "task in %s seconds", task.id, task.stakeholders, config.remove_delay) task.remove = time.time() + config.remove_delay # If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants: logger.info("Task %r is marked as running by disconnected worker %r -> marking as " "FAILED with retry delay of %rs", task.id, task.worker_running, config.retry_delay) task.worker_running = None self.set_status(task, FAILED, config) task.retry = time.time() + config.retry_delay # Re-enable task after the disable time expires if task.status == DISABLED and task.scheduler_disable_time: if time.time() - fix_time(task.scheduler_disable_time) > config.disable_persist: self.re_enable(task, config) # Remove tasks that have no stakeholders if task.remove and time.time() > task.remove: logger.info("Removing task %r (no connected stakeholders)", task.id) remove = True # Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0 if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time(): self.set_status(task, PENDING, config) return remove def inactivate_tasks(self, delete_tasks): # The terminology is a bit confusing: we used to "delete" tasks when they became inactive, # but with a pluggable state storage, you might very well want to keep some history of # older tasks as well. That's why we call it "inactivate" (as in the verb) for task in delete_tasks: task_obj = self._tasks.pop(task) self._status_tasks[task_obj.status].pop(task) def get_active_workers(self, last_active_lt=None): for worker in six.itervalues(self._active_workers): if last_active_lt is not None and worker.last_active >= last_active_lt: continue yield worker def get_assistants(self, last_active_lt=None): return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt)) def get_worker_ids(self): return self._active_workers.keys() # only used for unit tests def get_worker(self, worker_id): return self._active_workers.setdefault(worker_id, Worker(worker_id)) def inactivate_workers(self, delete_workers): # Mark workers as inactive for worker in delete_workers: self._active_workers.pop(worker) # remove workers from tasks for task in self.get_active_tasks(): task.stakeholders.difference_update(delete_workers) task.workers.difference_update(delete_workers) class CentralPlannerScheduler(Scheduler): """ Async scheduler that can handle multiple workers, etc. Can be run locally or on a server (using RemoteScheduler + server.Server). """ def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs): """ Keyword Arguments: :param config: an object of class "scheduler" or None (in which the global instance will be used) :param resources: a dict of str->int constraints :param task_history_override: ignore config and use this object as the task history """ self._config = config or scheduler(**kwargs) self._state = SimpleTaskState(self._config.state_path) if task_history_impl: self._task_history = task_history_impl elif self._config.record_task_history: import db_task_history # Needs sqlalchemy, thus imported here self._task_history = db_task_history.DbTaskHistory() else: self._task_history = history.NopHistory() self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter? self._make_task = functools.partial( Task, disable_failures=self._config.disable_failures, disable_window=self._config.disable_window) def load(self): self._state.load() def dump(self): self._state.dump() def prune(self): logger.info("Starting pruning of task graph") remove_workers = [] for worker in self._state.get_active_workers(): if worker.prune(self._config): logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay) remove_workers.append(worker.id) self._state.inactivate_workers(remove_workers) assistant_ids = set(w.id for w in self._state.get_assistants()) remove_tasks = [] for task in self._state.get_active_tasks(): if self._state.prune(task, self._config, assistant_ids): remove_tasks.append(task.id) self._state.inactivate_tasks(remove_tasks) logger.info("Done pruning task graph") def update(self, worker_id, worker_reference=None): """ Keep track of whenever the worker was last active. """ worker = self._state.get_worker(worker_id) worker.update(worker_reference) def _update_priority(self, task, prio, worker): """ Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled. """ task.priority = prio = max(prio, task.priority) for dep in task.deps or []: t = self._state.get_task(dep) if t is not None and prio > t.priority: self._update_priority(t, prio, worker) def add_task(self, worker, task_id, status=PENDING, runnable=True, deps=None, new_deps=None, expl=None, resources=None, priority=0, family='', module=None, params=None, **kwargs): """ * add task identified by task_id if it doesn't exist * if deps is not None, update dependency list * update status of task * add additional workers/stakeholders * update priority when needed """ self.update(worker) task = self._state.get_task(task_id, setdefault=self._make_task( task_id=task_id, status=PENDING, deps=deps, resources=resources, priority=priority, family=family, module=module, params=params)) # for setting priority, we'll sometimes create tasks with unset family and params if not task.family: task.family = family if not getattr(task, 'module', None): task.module = module if not task.params: task.params = _get_default(params, {}) if task.remove is not None: task.remove = None # unmark task for removal so it isn't removed after being added if not (task.status == RUNNING and status == PENDING): # don't allow re-scheduling of task while it is running, it must either fail or succeed first if status == PENDING or status != task.status: # Update the DB only if there was a acctual change, to prevent noise. # We also check for status == PENDING b/c that's the default value # (so checking for status != task.status woule lie) self._update_task_history(task_id, status) self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config) if status == FAILED: task.retry = time.time() + self._config.retry_delay if deps is not None: task.deps = set(deps) if new_deps is not None: task.deps.update(new_deps) if resources is not None: task.resources = resources # only assistants should normally schedule tasks as FAILED and not runnable if runnable or status != FAILED: task.stakeholders.add(worker) # Task dependencies might not exist yet. Let's create dummy tasks for them for now. # Otherwise the task dependencies might end up being pruned if scheduling takes a long time for dep in task.deps or []: t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority)) t.stakeholders.add(worker) self._update_priority(task, priority, worker) if runnable: task.workers.add(worker) if expl is not None: task.expl = expl def add_worker(self, worker, info, **kwargs): self._state.get_worker(worker).add_info(info) def update_resources(self, **resources): if self._resources is None: self._resources = {} self._resources.update(resources) def _has_resources(self, needed_resources, used_resources): if needed_resources is None: return True available_resources = self._resources or {} for resource, amount in six.iteritems(needed_resources): if amount + used_resources[resource] > available_resources.get(resource, 1): return False return True def _used_resources(self): used_resources = collections.defaultdict(int) if self._resources is not None: for task in self._state.get_active_tasks(): if task.status == RUNNING and task.resources: for resource, amount in six.iteritems(task.resources): used_resources[resource] += amount return used_resources def _rank(self): """ Return worker's rank function for task scheduling. :return: """ dependents = collections.defaultdict(int) def not_done(t): task = self._state.get_task(t, default=None) return task is None or task.status != DONE for task in self._state.get_pending_tasks(): if task.status != DONE: deps = list(filter(not_done, task.deps)) inverse_num_deps = 1.0 / max(len(deps), 1) for dep in deps: dependents[dep] += inverse_num_deps return lambda task: (task.priority, dependents[task.id], -task.time) def _schedulable(self, task): if task.status != PENDING: return False for dep in task.deps: dep_task = self._state.get_task(dep, default=None) if dep_task is None or dep_task.status != DONE: return False return True def get_work(self, worker, host=None, assistant=False, **kwargs): # TODO: remove any expired nodes # Algo: iterate over all nodes, find the highest priority node no dependencies and available # resources. # Resource checking looks both at currently available resources and at which resources would # be available if all running tasks died and we rescheduled all workers greedily. We do both # checks in order to prevent a worker with many low-priority tasks from starving other # workers with higher priority tasks that share the same resources. # TODO: remove tasks that can't be done, figure out if the worker has absolutely # nothing it can wait for # Return remaining tasks that have no FAILED descendents self.update(worker, {'host': host}) if assistant: self.add_worker(worker, [('assistant', assistant)]) best_task = None locally_pending_tasks = 0 running_tasks = [] used_resources = self._used_resources() greedy_resources = collections.defaultdict(int) n_unique_pending = 0 greedy_workers = dict((worker.id, worker.info.get('workers', 1)) for worker in self._state.get_active_workers()) tasks = list(self._state.get_pending_tasks()) tasks.sort(key=self._rank(), reverse=True) for task in tasks: in_workers = (assistant and task.workers) or worker in task.workers if task.status == 'RUNNING' and in_workers: # Return a list of currently running tasks to the client, # makes it easier to troubleshoot other_worker = self._state.get_worker(task.worker_running) more_info = {'task_id': task.id, 'worker': str(other_worker)} if other_worker is not None: more_info.update(other_worker.info) running_tasks.append(more_info) if task.status == PENDING and in_workers: locally_pending_tasks += 1 if len(task.workers) == 1 and not assistant: n_unique_pending += 1 if task.status == RUNNING and (task.worker_running in greedy_workers): greedy_workers[task.worker_running] -= 1 for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount if not best_task and self._schedulable(task) and self._has_resources(task.resources, greedy_resources): if in_workers and self._has_resources(task.resources, used_resources): best_task = task else: workers = itertools.chain(task.workers, [worker]) if assistant else task.workers for task_worker in workers: if greedy_workers.get(task_worker, 0) > 0: # use up a worker greedy_workers[task_worker] -= 1 # keep track of the resources used in greedy scheduling for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount break reply = {'n_pending_tasks': locally_pending_tasks, 'running_tasks': running_tasks, 'task_id': None, 'n_unique_pending': n_unique_pending} if best_task: self._state.set_status(best_task, RUNNING, self._config) best_task.worker_running = worker best_task.time_running = time.time() self._update_task_history(best_task.id, RUNNING, host=host) reply['task_id'] = best_task.id reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = best_task.params return reply def ping(self, worker, **kwargs): self.update(worker) def _upstream_status(self, task_id, upstream_status_table): if task_id in upstream_status_table: return upstream_status_table[task_id] elif self._state.has_task(task_id): task_stack = [task_id] while task_stack: dep_id = task_stack.pop() if self._state.has_task(dep_id): dep = self._state.get_task(dep_id) if dep_id not in upstream_status_table: if dep.status == PENDING and dep.deps: task_stack = task_stack + [dep_id] + list(dep.deps) upstream_status_table[dep_id] = '' # will be updated postorder else: dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '') upstream_status_table[dep_id] = dep_status elif upstream_status_table[dep_id] == '' and dep.deps: # This is the postorder update step when we set the # status based on the previously calculated child elements upstream_status = [upstream_status_table.get(task_id, '') for task_id in dep.deps] upstream_status.append('') # to handle empty list status = max(upstream_status, key=UPSTREAM_SEVERITY_KEY) upstream_status_table[dep_id] = status return upstream_status_table[dep_id] def _serialize_task(self, task_id, include_deps=True): task = self._state.get_task(task_id) ret = { 'deps': list(task.deps), 'status': task.status, 'workers': list(task.workers), 'worker_running': task.worker_running, 'time_running': getattr(task, "time_running", None), 'start_time': task.time, 'params': task.params, 'name': task.family, 'priority': task.priority, 'resources': task.resources, } if task.status == DISABLED: ret['re_enable_able'] = task.scheduler_disable_time is not None if include_deps: ret['deps'] = list(task.deps) return ret def graph(self, **kwargs): self.prune() serialized = {} for task in self._state.get_active_tasks(): serialized[task.id] = self._serialize_task(task.id) return serialized def _recurse_deps(self, task_id, serialized): if task_id not in serialized: task = self._state.get_task(task_id) if task is None or not task.family: logger.warn('Missing task for id [%s]', task_id) # try to infer family and params from task_id try: family, _, param_str = task_id.rstrip(')').partition('(') params = dict(param.split('=') for param in param_str.split(', ')) except BaseException: family, params = '', {} serialized[task_id] = { 'deps': [], 'status': UNKNOWN, 'workers': [], 'start_time': UNKNOWN, 'params': params, 'name': family, 'priority': 0, } else: serialized[task_id] = self._serialize_task(task_id) for dep in task.deps: self._recurse_deps(dep, serialized) def dep_graph(self, task_id, **kwargs): self.prune() serialized = {} if self._state.has_task(task_id): self._recurse_deps(task_id, serialized) return serialized def task_list(self, status, upstream_status, limit=True, **kwargs): """ Query for a subset of tasks by status. """ self.prune() result = {} upstream_status_table = {} # used to memoize upstream status for task in self._state.get_active_tasks(status): if (task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table)): serialized = self._serialize_task(task.id, False) result[task.id] = serialized if limit and len(result) > self._config.max_shown_tasks: return {'num_tasks': len(result)} return result def worker_list(self, include_running=True, **kwargs): self.prune() workers = [ dict( name=worker.id, last_active=worker.last_active, started=getattr(worker, 'started', None), **worker.info ) for worker in self._state.get_active_workers()] workers.sort(key=lambda worker: worker['started'], reverse=True) if include_running: running = collections.defaultdict(dict) num_pending = collections.defaultdict(int) num_uniques = collections.defaultdict(int) for task in self._state.get_pending_tasks(): if task.status == RUNNING and task.worker_running: running[task.worker_running][task.id] = self._serialize_task(task.id, False) elif task.status == PENDING: for worker in task.workers: num_pending[worker] += 1 if len(task.workers) == 1: num_uniques[list(task.workers)[0]] += 1 for worker in workers: tasks = running[worker['name']] worker['num_running'] = len(tasks) worker['num_pending'] = num_pending[worker['name']] worker['num_uniques'] = num_uniques[worker['name']] worker['running'] = tasks return workers def inverse_dep_graph(self, task_id, **kwargs): self.prune() serialized = {} if self._state.has_task(task_id): self._traverse_inverse_deps(task_id, serialized) return serialized def _traverse_inverse_deps(self, task_id, serialized): stack = [task_id] serialized[task_id] = self._serialize_task(task_id) while len(stack) > 0: curr_id = stack.pop() for task in self._state.get_active_tasks(): if curr_id in task.deps: serialized[curr_id]["deps"].append(task.id) if task.id not in serialized: serialized[task.id] = self._serialize_task(task.id) serialized[task.id]["deps"] = [] stack.append(task.id) def task_search(self, task_str, **kwargs): """ Query for a subset of tasks by task_id. :param task_str: :return: """ self.prune() result = collections.defaultdict(dict) for task in self._state.get_active_tasks(): if task.id.find(task_str) != -1: serialized = self._serialize_task(task.id, False) result[task.status][task.id] = serialized return result def re_enable_task(self, task_id): serialized = {} task = self._state.get_task(task_id) if task and task.status == DISABLED and task.scheduler_disable_time: self._state.re_enable(task, self._config) serialized = self._serialize_task(task_id) return serialized def fetch_error(self, task_id, **kwargs): if self._state.has_task(task_id): return {"taskId": task_id, "error": self._state.get_task(task_id).expl} else: return {"taskId": task_id, "error": ""} def _update_task_history(self, task_id, status, host=None): try: if status == DONE or status == FAILED: successful = (status == DONE) self._task_history.task_finished(task_id, successful) elif status == PENDING: self._task_history.task_scheduled(task_id) elif status == RUNNING: self._task_history.task_started(task_id, host) except BaseException: logger.warning("Error saving Task history", exc_info=True) @property def task_history(self): # Used by server.py to expose the calls return self._task_history
BugsInPy/BugsInPy/temp/projects/luigi/bug-31-fixed/luigi/luigi/scheduler.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-31-buggy/luigi/luigi/scheduler.py
luigi-bug-11
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The system for scheduling tasks and executing them in order. Deals with dependencies, priorities, resources, etc. The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them. See :doc:`/central_scheduler` for more info. """ import collections import inspect try: import cPickle as pickle except ImportError: import pickle import functools import hashlib import itertools import logging import os import re import time from luigi import six from luigi import configuration from luigi import notifications from luigi import parameter from luigi import task_history as history from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN, \ BATCH_RUNNING from luigi.task import Config logger = logging.getLogger(__name__) UPSTREAM_RUNNING = 'UPSTREAM_RUNNING' UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT' UPSTREAM_FAILED = 'UPSTREAM_FAILED' UPSTREAM_DISABLED = 'UPSTREAM_DISABLED' UPSTREAM_SEVERITY_ORDER = ( '', UPSTREAM_RUNNING, UPSTREAM_MISSING_INPUT, UPSTREAM_FAILED, UPSTREAM_DISABLED, ) UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index STATUS_TO_UPSTREAM_MAP = { FAILED: UPSTREAM_FAILED, RUNNING: UPSTREAM_RUNNING, PENDING: UPSTREAM_MISSING_INPUT, DISABLED: UPSTREAM_DISABLED, } TASK_FAMILY_RE = re.compile(r'([^(_]+)[(_]') RPC_METHODS = {} _retry_policy_fields = [ "retry_count", "disable_hard_timeout", "disable_window", ] RetryPolicy = collections.namedtuple("RetryPolicy", _retry_policy_fields) def _get_empty_retry_policy(): return RetryPolicy(*[None] * len(_retry_policy_fields)) def rpc_method(**request_args): def _rpc_method(fn): # If request args are passed, return this function again for use as # the decorator function with the request args attached. fn_args = inspect.getargspec(fn) assert not fn_args.varargs assert fn_args.args[0] == 'self' all_args = fn_args.args[1:] defaults = dict(zip(reversed(all_args), reversed(fn_args.defaults or ()))) required_args = frozenset(arg for arg in all_args if arg not in defaults) fn_name = fn.__name__ @functools.wraps(fn) def rpc_func(self, *args, **kwargs): actual_args = defaults.copy() actual_args.update(dict(zip(all_args, args))) actual_args.update(kwargs) if not all(arg in actual_args for arg in required_args): raise TypeError('{} takes {} arguments ({} given)'.format( fn_name, len(all_args), len(actual_args))) return self._request('/api/{}'.format(fn_name), actual_args, **request_args) RPC_METHODS[fn_name] = rpc_func return fn return _rpc_method class scheduler(Config): # TODO(erikbern): the config_path is needed for backwards compatilibity. We # should drop the compatibility at some point retry_delay = parameter.FloatParameter(default=900.0) remove_delay = parameter.FloatParameter(default=600.0) worker_disconnect_delay = parameter.FloatParameter(default=60.0) state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle') # Jobs are disabled if we see more than retry_count failures in disable_window seconds. # These disables last for disable_persist seconds. disable_window = parameter.IntParameter(default=3600, config_path=dict(section='scheduler', name='disable-window-seconds')) retry_count = parameter.IntParameter(default=999999999, config_path=dict(section='scheduler', name='disable_failures')) disable_hard_timeout = parameter.IntParameter(default=999999999, config_path=dict(section='scheduler', name='disable-hard-timeout')) disable_persist = parameter.IntParameter(default=86400, config_path=dict(section='scheduler', name='disable-persist-seconds')) max_shown_tasks = parameter.IntParameter(default=100000) max_graph_nodes = parameter.IntParameter(default=100000) record_task_history = parameter.BoolParameter(default=False) prune_on_get_work = parameter.BoolParameter(default=False) def _get_retry_policy(self): return RetryPolicy(self.retry_count, self.disable_hard_timeout, self.disable_window) class Failures(object): """ This class tracks the number of failures in a given time window. Failures added are marked with the current timestamp, and this class counts the number of failures in a sliding time window ending at the present. """ def __init__(self, window): """ Initialize with the given window. :param window: how long to track failures for, as a float (number of seconds). """ self.window = window self.failures = collections.deque() self.first_failure_time = None def add_failure(self): """ Add a failure event with the current timestamp. """ failure_time = time.time() if not self.first_failure_time: self.first_failure_time = failure_time self.failures.append(failure_time) def num_failures(self): """ Return the number of failures in the window. """ min_time = time.time() - self.window while self.failures and self.failures[0] < min_time: self.failures.popleft() return len(self.failures) def clear(self): """ Clear the failure queue. """ self.failures.clear() def _get_default(x, default): if x is not None: return x else: return default class Task(object): def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None, params=None, tracking_url=None, status_message=None, retry_policy='notoptional'): self.id = task_id self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active) self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active if deps is None: self.deps = set() else: self.deps = set(deps) self.status = status # PENDING, RUNNING, FAILED or DONE self.time = time.time() # Timestamp when task was first added self.updated = self.time self.retry = None self.remove = None self.worker_running = None # the worker id that is currently running the task or None self.time_running = None # Timestamp when picked up by worker self.expl = None self.priority = priority self.resources = _get_default(resources, {}) self.family = family self.module = module self.params = _get_default(params, {}) self.retry_policy = retry_policy self.failures = Failures(self.retry_policy.disable_window) self.tracking_url = tracking_url self.status_message = status_message self.scheduler_disable_time = None self.runnable = False self.batchable = False self.batch_id = None def __repr__(self): return "Task(%r)" % vars(self) # TODO(2017-08-10) replace this function with direct calls to batchable # this only exists for backward compatibility def is_batchable(self): try: return self.batchable except AttributeError: return False def add_failure(self): self.failures.add_failure() def has_excessive_failures(self): if self.failures.first_failure_time is not None: if (time.time() >= self.failures.first_failure_time + self.retry_policy.disable_hard_timeout): return True logger.debug('%s task num failures is %s and limit is %s', self.id, self.failures.num_failures(), self.retry_policy.retry_count) if self.failures.num_failures() >= self.retry_policy.retry_count: logger.debug('%s task num failures limit(%s) is exceeded', self.id, self.retry_policy.retry_count) return True return False @property def pretty_id(self): param_str = ', '.join('{}={}'.format(key, value) for key, value in self.params.items()) return '{}({})'.format(self.family, param_str) class Worker(object): """ Structure for tracking worker activity and keeping their references. """ def __init__(self, worker_id, last_active=None): self.id = worker_id self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host) self.last_active = last_active or time.time() # seconds since epoch self.last_get_work = None self.started = time.time() # seconds since epoch self.tasks = set() # task objects self.info = {} self.disabled = False def add_info(self, info): self.info.update(info) def update(self, worker_reference, get_work=False): if worker_reference: self.reference = worker_reference self.last_active = time.time() if get_work: self.last_get_work = time.time() def prune(self, config): # Delete workers that haven't said anything for a while (probably killed) if self.last_active + config.worker_disconnect_delay < time.time(): return True def get_pending_tasks(self, state): """ Get PENDING (and RUNNING) tasks for this worker. You have to pass in the state for optimization reasons. """ if len(self.tasks) < state.num_pending_tasks(): return six.moves.filter(lambda task: task.status in [PENDING, RUNNING], self.tasks) else: return state.get_pending_tasks() def is_trivial_worker(self, state): """ If it's not an assistant having only tasks that are without requirements. We have to pass the state parameter for optimization reasons. """ if self.assistant: return False return all(not task.resources for task in self.get_pending_tasks(state)) @property def assistant(self): return self.info.get('assistant', False) def __str__(self): return self.id class SimpleTaskState(object): """ Keep track of the current state and handle persistance. The point of this class is to enable other ways to keep state, eg. by using a database These will be implemented by creating an abstract base class that this and other classes inherit from. """ def __init__(self, state_path): self._state_path = state_path self._tasks = {} # map from id to a Task object self._status_tasks = collections.defaultdict(dict) self._active_workers = {} # map from id to a Worker object self._task_batchers = {} def get_state(self): return self._tasks, self._active_workers, self._task_batchers def set_state(self, state): self._tasks, self._active_workers = state[:2] if len(state) >= 3: self._task_batchers = state[2] def dump(self): try: with open(self._state_path, 'wb') as fobj: pickle.dump(self.get_state(), fobj) except IOError: logger.warning("Failed saving scheduler state", exc_info=1) else: logger.info("Saved state in %s", self._state_path) # prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control? def load(self): if os.path.exists(self._state_path): logger.info("Attempting to load state from %s", self._state_path) try: with open(self._state_path, 'rb') as fobj: state = pickle.load(fobj) except BaseException: logger.exception("Error when loading state. Starting from empty state.") return self.set_state(state) self._status_tasks = collections.defaultdict(dict) for task in six.itervalues(self._tasks): self._status_tasks[task.status][task.id] = task else: logger.info("No prior state file exists at %s. Starting with empty state", self._state_path) def get_active_tasks(self, status=None): if status: for task in six.itervalues(self._status_tasks[status]): yield task else: for task in six.itervalues(self._tasks): yield task def get_batch_running_tasks(self, batch_id): assert batch_id is not None return [ task for task in self.get_active_tasks(BATCH_RUNNING) if task.batch_id == batch_id ] def get_running_tasks(self): return six.itervalues(self._status_tasks[RUNNING]) def get_pending_tasks(self): return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status]) for status in [PENDING, RUNNING]) def set_batcher(self, worker_id, family, batcher_args, max_batch_size): self._task_batchers.setdefault(worker_id, {}) self._task_batchers[worker_id][family] = (batcher_args, max_batch_size) def get_batcher(self, worker_id, family): return self._task_batchers.get(worker_id, {}).get(family, (None, 1)) def num_pending_tasks(self): """ Return how many tasks are PENDING + RUNNING. O(1). """ return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING]) def get_task(self, task_id, default=None, setdefault=None): if setdefault: task = self._tasks.setdefault(task_id, setdefault) self._status_tasks[task.status][task.id] = task return task else: return self._tasks.get(task_id, default) def has_task(self, task_id): return task_id in self._tasks def re_enable(self, task, config=None): task.scheduler_disable_time = None task.failures.clear() if config: self.set_status(task, FAILED, config) task.failures.clear() def set_batch_running(self, task, batch_id, worker_id): self.set_status(task, BATCH_RUNNING) task.batch_id = batch_id task.worker_running = worker_id def set_status(self, task, new_status, config=None): if new_status == FAILED: assert config is not None if new_status == DISABLED and task.status in (RUNNING, BATCH_RUNNING): return remove_on_failure = task.batch_id is not None and not task.batchable if task.status == DISABLED: if new_status == DONE: self.re_enable(task) # don't allow workers to override a scheduler disable elif task.scheduler_disable_time is not None and new_status != DISABLED: return if task.status == RUNNING and task.batch_id is not None: for batch_task in self.get_batch_running_tasks(task.batch_id): self.set_status(batch_task, new_status, config) batch_task.batch_id = None task.batch_id = None if new_status == FAILED and task.status != DISABLED: task.add_failure() if task.has_excessive_failures(): task.scheduler_disable_time = time.time() new_status = DISABLED notifications.send_error_email( 'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id), '{task} failed {failures} times in the last {window} seconds, so it is being ' 'disabled for {persist} seconds'.format( failures=task.retry_policy.retry_count, task=task.id, window=config.disable_window, persist=config.disable_persist, )) elif new_status == DISABLED: task.scheduler_disable_time = None if new_status != task.status: self._status_tasks[task.status].pop(task.id) self._status_tasks[new_status][task.id] = task task.status = new_status task.updated = time.time() if new_status == FAILED: task.retry = time.time() + config.retry_delay if remove_on_failure: task.remove = time.time() def fail_dead_worker_task(self, task, config, assistants): # If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants: logger.info("Task %r is marked as running by disconnected worker %r -> marking as " "FAILED with retry delay of %rs", task.id, task.worker_running, config.retry_delay) task.worker_running = None self.set_status(task, FAILED, config) task.retry = time.time() + config.retry_delay def update_status(self, task, config): # Mark tasks with no remaining active stakeholders for deletion if (not task.stakeholders) and (task.remove is None) and (task.status != RUNNING): # We don't check for the RUNNING case, because that is already handled # by the fail_dead_worker_task function. logger.debug("Task %r has no stakeholders anymore -> might remove " "task in %s seconds", task.id, config.remove_delay) task.remove = time.time() + config.remove_delay # Re-enable task after the disable time expires if task.status == DISABLED and task.scheduler_disable_time is not None: if time.time() - task.scheduler_disable_time > config.disable_persist: self.re_enable(task, config) # Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0 if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time(): self.set_status(task, PENDING, config) def may_prune(self, task): return task.remove and time.time() >= task.remove def inactivate_tasks(self, delete_tasks): # The terminology is a bit confusing: we used to "delete" tasks when they became inactive, # but with a pluggable state storage, you might very well want to keep some history of # older tasks as well. That's why we call it "inactivate" (as in the verb) for task in delete_tasks: task_obj = self._tasks.pop(task) self._status_tasks[task_obj.status].pop(task) def get_active_workers(self, last_active_lt=None, last_get_work_gt=None): for worker in six.itervalues(self._active_workers): if last_active_lt is not None and worker.last_active >= last_active_lt: continue last_get_work = getattr(worker, 'last_get_work', None) if last_get_work_gt is not None and ( last_get_work is None or last_get_work <= last_get_work_gt): continue yield worker def get_assistants(self, last_active_lt=None): return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt)) def get_worker_ids(self): return self._active_workers.keys() # only used for unit tests def get_worker(self, worker_id): return self._active_workers.setdefault(worker_id, Worker(worker_id)) def inactivate_workers(self, delete_workers): # Mark workers as inactive for worker in delete_workers: self._active_workers.pop(worker) self._remove_workers_from_tasks(delete_workers) def _remove_workers_from_tasks(self, workers, remove_stakeholders=True): for task in self.get_active_tasks(): if remove_stakeholders: task.stakeholders.difference_update(workers) task.workers.difference_update(workers) def disable_workers(self, workers): self._remove_workers_from_tasks(workers, remove_stakeholders=False) for worker in workers: self.get_worker(worker).disabled = True class Scheduler(object): """ Async scheduler that can handle multiple workers, etc. Can be run locally or on a server (using RemoteScheduler + server.Server). """ def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs): """ Keyword Arguments: :param config: an object of class "scheduler" or None (in which the global instance will be used) :param resources: a dict of str->int constraints :param task_history_impl: ignore config and use this object as the task history """ self._config = config or scheduler(**kwargs) self._state = SimpleTaskState(self._config.state_path) if task_history_impl: self._task_history = task_history_impl elif self._config.record_task_history: from luigi import db_task_history # Needs sqlalchemy, thus imported here self._task_history = db_task_history.DbTaskHistory() else: self._task_history = history.NopHistory() self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter? self._make_task = functools.partial(Task, retry_policy=self._config._get_retry_policy()) self._worker_requests = {} def load(self): self._state.load() def dump(self): self._state.dump() @rpc_method() def prune(self): logger.info("Starting pruning of task graph") self._prune_workers() self._prune_tasks() logger.info("Done pruning task graph") def _prune_workers(self): remove_workers = [] for worker in self._state.get_active_workers(): if worker.prune(self._config): logger.debug("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay) remove_workers.append(worker.id) self._state.inactivate_workers(remove_workers) def _prune_tasks(self): assistant_ids = set(w.id for w in self._state.get_assistants()) remove_tasks = [] for task in self._state.get_active_tasks(): self._state.fail_dead_worker_task(task, self._config, assistant_ids) self._state.update_status(task, self._config) if self._state.may_prune(task): logger.info("Removing task %r", task.id) remove_tasks.append(task.id) self._state.inactivate_tasks(remove_tasks) def update(self, worker_id, worker_reference=None, get_work=False): """ Keep track of whenever the worker was last active. """ worker = self._state.get_worker(worker_id) worker.update(worker_reference, get_work=get_work) return not getattr(worker, 'disabled', False) def _update_priority(self, task, prio, worker): """ Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled. """ task.priority = prio = max(prio, task.priority) for dep in task.deps or []: t = self._state.get_task(dep) if t is not None and prio > t.priority: self._update_priority(t, prio, worker) @rpc_method() def add_task_batcher(self, worker, task_family, batched_args, max_batch_size=float('inf')): self._state.set_batcher(worker, task_family, batched_args, max_batch_size) @rpc_method() def add_task(self, task_id=None, status=PENDING, runnable=True, deps=None, new_deps=None, expl=None, resources=None, priority=0, family='', module=None, params=None, assistant=False, tracking_url=None, worker=None, batchable=None, batch_id=None, retry_policy_dict={}, **kwargs): """ * add task identified by task_id if it doesn't exist * if deps is not None, update dependency list * update status of task * add additional workers/stakeholders * update priority when needed """ assert worker is not None worker_id = worker worker_enabled = self.update(worker_id) retry_policy = self._generate_retry_policy(retry_policy_dict) if worker_enabled: _default_task = self._make_task( task_id=task_id, status=PENDING, deps=deps, resources=resources, priority=priority, family=family, module=module, params=params, ) else: _default_task = None task = self._state.get_task(task_id, setdefault=_default_task) if task is None or (task.status != RUNNING and not worker_enabled): return # for setting priority, we'll sometimes create tasks with unset family and params if not task.family: task.family = family if not getattr(task, 'module', None): task.module = module if not task.params: task.params = _get_default(params, {}) if batch_id is not None: task.batch_id = batch_id if status == RUNNING and not task.worker_running: task.worker_running = worker_id if tracking_url is not None or task.status != RUNNING: task.tracking_url = tracking_url if task.batch_id is not None: for batch_task in self._state.get_batch_running_tasks(task.batch_id): batch_task.tracking_url = tracking_url if batchable is not None: task.batchable = batchable if task.remove is not None: task.remove = None # unmark task for removal so it isn't removed after being added if expl is not None: task.expl = expl if task.batch_id is not None: for batch_task in self._state.get_batch_running_tasks(task.batch_id): batch_task.expl = expl if not (task.status in (RUNNING, BATCH_RUNNING) and status == PENDING) or new_deps: # don't allow re-scheduling of task while it is running, it must either fail or succeed first if status == PENDING or status != task.status: # Update the DB only if there was a acctual change, to prevent noise. # We also check for status == PENDING b/c that's the default value # (so checking for status != task.status woule lie) self._update_task_history(task, status) self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config) if deps is not None: task.deps = set(deps) if new_deps is not None: task.deps.update(new_deps) if resources is not None: task.resources = resources if worker_enabled and not assistant: task.stakeholders.add(worker_id) # Task dependencies might not exist yet. Let's create dummy tasks for them for now. # Otherwise the task dependencies might end up being pruned if scheduling takes a long time for dep in task.deps or []: t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority)) t.stakeholders.add(worker_id) self._update_priority(task, priority, worker_id) # Because some tasks (non-dynamic dependencies) are `_make_task`ed # before we know their retry_policy, we always set it here task.retry_policy = retry_policy if runnable and status != FAILED and worker_enabled: task.workers.add(worker_id) self._state.get_worker(worker_id).tasks.add(task) task.runnable = runnable @rpc_method() def add_worker(self, worker, info, **kwargs): self._state.get_worker(worker).add_info(info) @rpc_method() def disable_worker(self, worker): self._state.disable_workers({worker}) @rpc_method() def update_resources(self, **resources): if self._resources is None: self._resources = {} self._resources.update(resources) def _generate_retry_policy(self, task_retry_policy_dict): retry_policy_dict = self._config._get_retry_policy()._asdict() retry_policy_dict.update({k: v for k, v in six.iteritems(task_retry_policy_dict) if v is not None}) return RetryPolicy(**retry_policy_dict) def _has_resources(self, needed_resources, used_resources): if needed_resources is None: return True available_resources = self._resources or {} for resource, amount in six.iteritems(needed_resources): if amount + used_resources[resource] > available_resources.get(resource, 1): return False return True def _used_resources(self): used_resources = collections.defaultdict(int) if self._resources is not None: for task in self._state.get_active_tasks(status=RUNNING): if task.resources: for resource, amount in six.iteritems(task.resources): used_resources[resource] += amount return used_resources def _rank(self, task): """ Return worker's rank function for task scheduling. :return: """ return task.priority, -task.time def _schedulable(self, task): if task.status != PENDING: return False for dep in task.deps: dep_task = self._state.get_task(dep, default=None) if dep_task is None or dep_task.status != DONE: return False return True def _reset_orphaned_batch_running_tasks(self, worker_id): running_batch_ids = { task.batch_id for task in self._state.get_running_tasks() if task.worker_running == worker_id } orphaned_tasks = [ task for task in self._state.get_active_tasks(BATCH_RUNNING) if task.worker_running == worker_id and task.batch_id not in running_batch_ids ] for task in orphaned_tasks: self._state.set_status(task, PENDING) @rpc_method(allow_null=False) def get_work(self, host=None, assistant=False, current_tasks=None, worker=None, **kwargs): # TODO: remove any expired nodes # Algo: iterate over all nodes, find the highest priority node no dependencies and available # resources. # Resource checking looks both at currently available resources and at which resources would # be available if all running tasks died and we rescheduled all workers greedily. We do both # checks in order to prevent a worker with many low-priority tasks from starving other # workers with higher priority tasks that share the same resources. # TODO: remove tasks that can't be done, figure out if the worker has absolutely # nothing it can wait for if self._config.prune_on_get_work: self.prune() assert worker is not None worker_id = worker # Return remaining tasks that have no FAILED descendants self.update(worker_id, {'host': host}, get_work=True) if assistant: self.add_worker(worker_id, [('assistant', assistant)]) batched_params, unbatched_params, batched_tasks, max_batch_size = None, None, [], 1 best_task = None if current_tasks is not None: ct_set = set(current_tasks) for task in sorted(self._state.get_running_tasks(), key=self._rank): if task.worker_running == worker_id and task.id not in ct_set: best_task = task if current_tasks is not None: # batch running tasks that weren't claimed since the last get_work go back in the pool self._reset_orphaned_batch_running_tasks(worker_id) locally_pending_tasks = 0 running_tasks = [] upstream_table = {} greedy_resources = collections.defaultdict(int) n_unique_pending = 0 worker = self._state.get_worker(worker_id) if worker.is_trivial_worker(self._state): relevant_tasks = worker.get_pending_tasks(self._state) used_resources = collections.defaultdict(int) greedy_workers = dict() # If there's no resources, then they can grab any task else: relevant_tasks = self._state.get_pending_tasks() used_resources = self._used_resources() activity_limit = time.time() - self._config.worker_disconnect_delay active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit) greedy_workers = dict((worker.id, worker.info.get('workers', 1)) for worker in active_workers) tasks = list(relevant_tasks) tasks.sort(key=self._rank, reverse=True) for task in tasks: in_workers = (assistant and getattr(task, 'runnable', bool(task.workers))) or worker_id in task.workers if task.status == RUNNING and in_workers: # Return a list of currently running tasks to the client, # makes it easier to troubleshoot other_worker = self._state.get_worker(task.worker_running) more_info = {'task_id': task.id, 'worker': str(other_worker)} if other_worker is not None: more_info.update(other_worker.info) running_tasks.append(more_info) if task.status == PENDING and in_workers: upstream_status = self._upstream_status(task.id, upstream_table) if upstream_status != UPSTREAM_DISABLED: locally_pending_tasks += 1 if len(task.workers) == 1 and not assistant: n_unique_pending += 1 if (best_task and batched_params and task.family == best_task.family and len(batched_tasks) < max_batch_size and task.is_batchable() and all( task.params.get(name) == value for name, value in unbatched_params.items())): for name, params in batched_params.items(): params.append(task.params.get(name)) batched_tasks.append(task) if best_task: continue if task.status == RUNNING and (task.worker_running in greedy_workers): greedy_workers[task.worker_running] -= 1 for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount if self._schedulable(task) and self._has_resources(task.resources, greedy_resources): if in_workers and self._has_resources(task.resources, used_resources): best_task = task batch_param_names, max_batch_size = self._state.get_batcher( worker_id, task.family) if batch_param_names and task.is_batchable(): try: batched_params = { name: [task.params[name]] for name in batch_param_names } unbatched_params = { name: value for name, value in task.params.items() if name not in batched_params } batched_tasks.append(task) except KeyError: batched_params, unbatched_params = None, None else: workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers for task_worker in workers: if greedy_workers.get(task_worker, 0) > 0: # use up a worker greedy_workers[task_worker] -= 1 # keep track of the resources used in greedy scheduling for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount break reply = {'n_pending_tasks': locally_pending_tasks, 'running_tasks': running_tasks, 'task_id': None, 'n_unique_pending': n_unique_pending} if len(batched_tasks) > 1: batch_string = '|'.join(task.id for task in batched_tasks) batch_id = hashlib.md5(batch_string.encode('utf-8')).hexdigest() for task in batched_tasks: self._state.set_batch_running(task, batch_id, worker_id) combined_params = best_task.params.copy() combined_params.update(batched_params) reply['task_id'] = None reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = combined_params reply['batch_id'] = batch_id reply['batch_task_ids'] = [task.id for task in batched_tasks] elif best_task: self._state.set_status(best_task, RUNNING, self._config) best_task.worker_running = worker_id best_task.time_running = time.time() self._update_task_history(best_task, RUNNING, host=host) reply['task_id'] = best_task.id reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = best_task.params return reply @rpc_method(attempts=1) def ping(self, **kwargs): worker_id = kwargs['worker'] self.update(worker_id) def _upstream_status(self, task_id, upstream_status_table): if task_id in upstream_status_table: return upstream_status_table[task_id] elif self._state.has_task(task_id): task_stack = [task_id] while task_stack: dep_id = task_stack.pop() dep = self._state.get_task(dep_id) if dep: if dep.status == DONE: continue if dep_id not in upstream_status_table: if dep.status == PENDING and dep.deps: task_stack += [dep_id] + list(dep.deps) upstream_status_table[dep_id] = '' # will be updated postorder else: dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '') upstream_status_table[dep_id] = dep_status elif upstream_status_table[dep_id] == '' and dep.deps: # This is the postorder update step when we set the # status based on the previously calculated child elements upstream_severities = list(upstream_status_table.get(a_task_id) for a_task_id in dep.deps if a_task_id in upstream_status_table) or [''] status = min(upstream_severities, key=UPSTREAM_SEVERITY_KEY) upstream_status_table[dep_id] = status return upstream_status_table[dep_id] def _serialize_task(self, task_id, include_deps=True, deps=None): task = self._state.get_task(task_id) ret = { 'display_name': task.pretty_id, 'status': task.status, 'workers': list(task.workers), 'worker_running': task.worker_running, 'time_running': getattr(task, "time_running", None), 'start_time': task.time, 'last_updated': getattr(task, "updated", task.time), 'params': task.params, 'name': task.family, 'priority': task.priority, 'resources': task.resources, 'tracking_url': getattr(task, "tracking_url", None), 'status_message': getattr(task, "status_message", None) } if task.status == DISABLED: ret['re_enable_able'] = task.scheduler_disable_time is not None if include_deps: ret['deps'] = list(task.deps if deps is None else deps) return ret @rpc_method() def graph(self, **kwargs): self.prune() serialized = {} seen = set() for task in self._state.get_active_tasks(): serialized.update(self._traverse_graph(task.id, seen)) return serialized def _filter_done(self, task_ids): for task_id in task_ids: task = self._state.get_task(task_id) if task is None or task.status != DONE: yield task_id def _traverse_graph(self, root_task_id, seen=None, dep_func=None, include_done=True): """ Returns the dependency graph rooted at task_id This does a breadth-first traversal to find the nodes closest to the root before hitting the scheduler.max_graph_nodes limit. :param root_task_id: the id of the graph's root :return: A map of task id to serialized node """ if seen is None: seen = set() elif root_task_id in seen: return {} if dep_func is None: def dep_func(t): return t.deps seen.add(root_task_id) serialized = {} queue = collections.deque([root_task_id]) while queue: task_id = queue.popleft() task = self._state.get_task(task_id) if task is None or not task.family: logger.debug('Missing task for id [%s]', task_id) # NOTE : If a dependency is missing from self._state there is no way to deduce the # task family and parameters. family_match = TASK_FAMILY_RE.match(task_id) family = family_match.group(1) if family_match else UNKNOWN params = {'task_id': task_id} serialized[task_id] = { 'deps': [], 'status': UNKNOWN, 'workers': [], 'start_time': UNKNOWN, 'params': params, 'name': family, 'display_name': task_id, 'priority': 0, } else: deps = dep_func(task) if not include_done: deps = list(self._filter_done(deps)) serialized[task_id] = self._serialize_task(task_id, deps=deps) for dep in sorted(deps): if dep not in seen: seen.add(dep) queue.append(dep) if task_id != root_task_id: del serialized[task_id]['display_name'] if len(serialized) >= self._config.max_graph_nodes: break return serialized @rpc_method() def dep_graph(self, task_id, include_done=True, **kwargs): self.prune() if not self._state.has_task(task_id): return {} return self._traverse_graph(task_id, include_done=include_done) @rpc_method() def inverse_dep_graph(self, task_id, include_done=True, **kwargs): self.prune() if not self._state.has_task(task_id): return {} inverse_graph = collections.defaultdict(set) for task in self._state.get_active_tasks(): for dep in task.deps: inverse_graph[dep].add(task.id) return self._traverse_graph( task_id, dep_func=lambda t: inverse_graph[t.id], include_done=include_done) @rpc_method() def task_list(self, status='', upstream_status='', limit=True, search=None, **kwargs): """ Query for a subset of tasks by status. """ self.prune() result = {} upstream_status_table = {} # used to memoize upstream status if search is None: def filter_func(_): return True else: terms = search.split() def filter_func(t): return all(term in t.pretty_id for term in terms) for task in filter(filter_func, self._state.get_active_tasks(status)): if task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table): serialized = self._serialize_task(task.id, False) result[task.id] = serialized if limit and len(result) > self._config.max_shown_tasks: return {'num_tasks': len(result)} return result def _first_task_display_name(self, worker): task_id = worker.info.get('first_task', '') if self._state.has_task(task_id): return self._state.get_task(task_id).pretty_id else: return task_id @rpc_method() def worker_list(self, include_running=True, **kwargs): self.prune() workers = [ dict( name=worker.id, last_active=worker.last_active, started=getattr(worker, 'started', None), first_task_display_name=self._first_task_display_name(worker), **worker.info ) for worker in self._state.get_active_workers()] workers.sort(key=lambda worker: worker['started'], reverse=True) if include_running: running = collections.defaultdict(dict) num_pending = collections.defaultdict(int) num_uniques = collections.defaultdict(int) for task in self._state.get_pending_tasks(): if task.status == RUNNING and task.worker_running: running[task.worker_running][task.id] = self._serialize_task(task.id, False) elif task.status == PENDING: for worker in task.workers: num_pending[worker] += 1 if len(task.workers) == 1: num_uniques[list(task.workers)[0]] += 1 for worker in workers: tasks = running[worker['name']] worker['num_running'] = len(tasks) worker['num_pending'] = num_pending[worker['name']] worker['num_uniques'] = num_uniques[worker['name']] worker['running'] = tasks return workers @rpc_method() def resource_list(self): """ Resources usage info and their consumers (tasks). """ self.prune() resources = [ dict( name=resource, num_total=r_dict['total'], num_used=r_dict['used'] ) for resource, r_dict in six.iteritems(self.resources())] if self._resources is not None: consumers = collections.defaultdict(dict) for task in self._state.get_running_tasks(): if task.status == RUNNING and task.resources: for resource, amount in six.iteritems(task.resources): consumers[resource][task.id] = self._serialize_task(task.id, False) for resource in resources: tasks = consumers[resource['name']] resource['num_consumer'] = len(tasks) resource['running'] = tasks return resources def resources(self): ''' get total resources and available ones ''' used_resources = self._used_resources() ret = collections.defaultdict(dict) for resource, total in six.iteritems(self._resources): ret[resource]['total'] = total if resource in used_resources: ret[resource]['used'] = used_resources[resource] else: ret[resource]['used'] = 0 return ret @rpc_method() def task_search(self, task_str, **kwargs): """ Query for a subset of tasks by task_id. :param task_str: :return: """ self.prune() result = collections.defaultdict(dict) for task in self._state.get_active_tasks(): if task.id.find(task_str) != -1: serialized = self._serialize_task(task.id, False) result[task.status][task.id] = serialized return result @rpc_method() def re_enable_task(self, task_id): serialized = {} task = self._state.get_task(task_id) if task and task.status == DISABLED and task.scheduler_disable_time: self._state.re_enable(task, self._config) serialized = self._serialize_task(task_id) return serialized @rpc_method() def fetch_error(self, task_id, **kwargs): if self._state.has_task(task_id): task = self._state.get_task(task_id) return {"taskId": task_id, "error": task.expl, 'displayName': task.pretty_id} else: return {"taskId": task_id, "error": ""} @rpc_method() def set_task_status_message(self, task_id, status_message): if self._state.has_task(task_id): task = self._state.get_task(task_id) task.status_message = status_message if task.status == RUNNING and task.batch_id is not None: for batch_task in self._state.get_batch_running_tasks(task.batch_id): batch_task.status_message = status_message @rpc_method() def get_task_status_message(self, task_id): if self._state.has_task(task_id): task = self._state.get_task(task_id) return {"taskId": task_id, "statusMessage": task.status_message} else: return {"taskId": task_id, "statusMessage": ""} def _update_task_history(self, task, status, host=None): try: if status == DONE or status == FAILED: successful = (status == DONE) self._task_history.task_finished(task, successful) elif status == PENDING: self._task_history.task_scheduled(task) elif status == RUNNING: self._task_history.task_started(task, host) except BaseException: logger.warning("Error saving Task history", exc_info=True) @property def task_history(self): # Used by server.py to expose the calls return self._task_history # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The system for scheduling tasks and executing them in order. Deals with dependencies, priorities, resources, etc. The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them. See :doc:`/central_scheduler` for more info. """ import collections import inspect try: import cPickle as pickle except ImportError: import pickle import functools import hashlib import itertools import logging import os import re import time from luigi import six from luigi import configuration from luigi import notifications from luigi import parameter from luigi import task_history as history from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN, \ BATCH_RUNNING from luigi.task import Config logger = logging.getLogger(__name__) UPSTREAM_RUNNING = 'UPSTREAM_RUNNING' UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT' UPSTREAM_FAILED = 'UPSTREAM_FAILED' UPSTREAM_DISABLED = 'UPSTREAM_DISABLED' UPSTREAM_SEVERITY_ORDER = ( '', UPSTREAM_RUNNING, UPSTREAM_MISSING_INPUT, UPSTREAM_FAILED, UPSTREAM_DISABLED, ) UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index STATUS_TO_UPSTREAM_MAP = { FAILED: UPSTREAM_FAILED, RUNNING: UPSTREAM_RUNNING, PENDING: UPSTREAM_MISSING_INPUT, DISABLED: UPSTREAM_DISABLED, } TASK_FAMILY_RE = re.compile(r'([^(_]+)[(_]') RPC_METHODS = {} _retry_policy_fields = [ "retry_count", "disable_hard_timeout", "disable_window", ] RetryPolicy = collections.namedtuple("RetryPolicy", _retry_policy_fields) def _get_empty_retry_policy(): return RetryPolicy(*[None] * len(_retry_policy_fields)) def rpc_method(**request_args): def _rpc_method(fn): # If request args are passed, return this function again for use as # the decorator function with the request args attached. fn_args = inspect.getargspec(fn) assert not fn_args.varargs assert fn_args.args[0] == 'self' all_args = fn_args.args[1:] defaults = dict(zip(reversed(all_args), reversed(fn_args.defaults or ()))) required_args = frozenset(arg for arg in all_args if arg not in defaults) fn_name = fn.__name__ @functools.wraps(fn) def rpc_func(self, *args, **kwargs): actual_args = defaults.copy() actual_args.update(dict(zip(all_args, args))) actual_args.update(kwargs) if not all(arg in actual_args for arg in required_args): raise TypeError('{} takes {} arguments ({} given)'.format( fn_name, len(all_args), len(actual_args))) return self._request('/api/{}'.format(fn_name), actual_args, **request_args) RPC_METHODS[fn_name] = rpc_func return fn return _rpc_method class scheduler(Config): # TODO(erikbern): the config_path is needed for backwards compatilibity. We # should drop the compatibility at some point retry_delay = parameter.FloatParameter(default=900.0) remove_delay = parameter.FloatParameter(default=600.0) worker_disconnect_delay = parameter.FloatParameter(default=60.0) state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle') # Jobs are disabled if we see more than retry_count failures in disable_window seconds. # These disables last for disable_persist seconds. disable_window = parameter.IntParameter(default=3600, config_path=dict(section='scheduler', name='disable-window-seconds')) retry_count = parameter.IntParameter(default=999999999, config_path=dict(section='scheduler', name='disable_failures')) disable_hard_timeout = parameter.IntParameter(default=999999999, config_path=dict(section='scheduler', name='disable-hard-timeout')) disable_persist = parameter.IntParameter(default=86400, config_path=dict(section='scheduler', name='disable-persist-seconds')) max_shown_tasks = parameter.IntParameter(default=100000) max_graph_nodes = parameter.IntParameter(default=100000) record_task_history = parameter.BoolParameter(default=False) prune_on_get_work = parameter.BoolParameter(default=False) def _get_retry_policy(self): return RetryPolicy(self.retry_count, self.disable_hard_timeout, self.disable_window) class Failures(object): """ This class tracks the number of failures in a given time window. Failures added are marked with the current timestamp, and this class counts the number of failures in a sliding time window ending at the present. """ def __init__(self, window): """ Initialize with the given window. :param window: how long to track failures for, as a float (number of seconds). """ self.window = window self.failures = collections.deque() self.first_failure_time = None def add_failure(self): """ Add a failure event with the current timestamp. """ failure_time = time.time() if not self.first_failure_time: self.first_failure_time = failure_time self.failures.append(failure_time) def num_failures(self): """ Return the number of failures in the window. """ min_time = time.time() - self.window while self.failures and self.failures[0] < min_time: self.failures.popleft() return len(self.failures) def clear(self): """ Clear the failure queue. """ self.failures.clear() def _get_default(x, default): if x is not None: return x else: return default class Task(object): def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None, params=None, tracking_url=None, status_message=None, retry_policy='notoptional'): self.id = task_id self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active) self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active if deps is None: self.deps = set() else: self.deps = set(deps) self.status = status # PENDING, RUNNING, FAILED or DONE self.time = time.time() # Timestamp when task was first added self.updated = self.time self.retry = None self.remove = None self.worker_running = None # the worker id that is currently running the task or None self.time_running = None # Timestamp when picked up by worker self.expl = None self.priority = priority self.resources = _get_default(resources, {}) self.family = family self.module = module self.params = _get_default(params, {}) self.retry_policy = retry_policy self.failures = Failures(self.retry_policy.disable_window) self.tracking_url = tracking_url self.status_message = status_message self.scheduler_disable_time = None self.runnable = False self.batchable = False self.batch_id = None def __repr__(self): return "Task(%r)" % vars(self) # TODO(2017-08-10) replace this function with direct calls to batchable # this only exists for backward compatibility def is_batchable(self): try: return self.batchable except AttributeError: return False def add_failure(self): self.failures.add_failure() def has_excessive_failures(self): if self.failures.first_failure_time is not None: if (time.time() >= self.failures.first_failure_time + self.retry_policy.disable_hard_timeout): return True logger.debug('%s task num failures is %s and limit is %s', self.id, self.failures.num_failures(), self.retry_policy.retry_count) if self.failures.num_failures() >= self.retry_policy.retry_count: logger.debug('%s task num failures limit(%s) is exceeded', self.id, self.retry_policy.retry_count) return True return False @property def pretty_id(self): param_str = ', '.join('{}={}'.format(key, value) for key, value in self.params.items()) return '{}({})'.format(self.family, param_str) class Worker(object): """ Structure for tracking worker activity and keeping their references. """ def __init__(self, worker_id, last_active=None): self.id = worker_id self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host) self.last_active = last_active or time.time() # seconds since epoch self.last_get_work = None self.started = time.time() # seconds since epoch self.tasks = set() # task objects self.info = {} self.disabled = False def add_info(self, info): self.info.update(info) def update(self, worker_reference, get_work=False): if worker_reference: self.reference = worker_reference self.last_active = time.time() if get_work: self.last_get_work = time.time() def prune(self, config): # Delete workers that haven't said anything for a while (probably killed) if self.last_active + config.worker_disconnect_delay < time.time(): return True def get_pending_tasks(self, state): """ Get PENDING (and RUNNING) tasks for this worker. You have to pass in the state for optimization reasons. """ if len(self.tasks) < state.num_pending_tasks(): return six.moves.filter(lambda task: task.status in [PENDING, RUNNING], self.tasks) else: return state.get_pending_tasks() def is_trivial_worker(self, state): """ If it's not an assistant having only tasks that are without requirements. We have to pass the state parameter for optimization reasons. """ if self.assistant: return False return all(not task.resources for task in self.get_pending_tasks(state)) @property def assistant(self): return self.info.get('assistant', False) def __str__(self): return self.id class SimpleTaskState(object): """ Keep track of the current state and handle persistance. The point of this class is to enable other ways to keep state, eg. by using a database These will be implemented by creating an abstract base class that this and other classes inherit from. """ def __init__(self, state_path): self._state_path = state_path self._tasks = {} # map from id to a Task object self._status_tasks = collections.defaultdict(dict) self._active_workers = {} # map from id to a Worker object self._task_batchers = {} def get_state(self): return self._tasks, self._active_workers, self._task_batchers def set_state(self, state): self._tasks, self._active_workers = state[:2] if len(state) >= 3: self._task_batchers = state[2] def dump(self): try: with open(self._state_path, 'wb') as fobj: pickle.dump(self.get_state(), fobj) except IOError: logger.warning("Failed saving scheduler state", exc_info=1) else: logger.info("Saved state in %s", self._state_path) # prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control? def load(self): if os.path.exists(self._state_path): logger.info("Attempting to load state from %s", self._state_path) try: with open(self._state_path, 'rb') as fobj: state = pickle.load(fobj) except BaseException: logger.exception("Error when loading state. Starting from empty state.") return self.set_state(state) self._status_tasks = collections.defaultdict(dict) for task in six.itervalues(self._tasks): self._status_tasks[task.status][task.id] = task else: logger.info("No prior state file exists at %s. Starting with empty state", self._state_path) def get_active_tasks(self, status=None): if status: for task in six.itervalues(self._status_tasks[status]): yield task else: for task in six.itervalues(self._tasks): yield task def get_batch_running_tasks(self, batch_id): assert batch_id is not None return [ task for task in self.get_active_tasks(BATCH_RUNNING) if task.batch_id == batch_id ] def get_running_tasks(self): return six.itervalues(self._status_tasks[RUNNING]) def get_pending_tasks(self): return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status]) for status in [PENDING, RUNNING]) def set_batcher(self, worker_id, family, batcher_args, max_batch_size): self._task_batchers.setdefault(worker_id, {}) self._task_batchers[worker_id][family] = (batcher_args, max_batch_size) def get_batcher(self, worker_id, family): return self._task_batchers.get(worker_id, {}).get(family, (None, 1)) def num_pending_tasks(self): """ Return how many tasks are PENDING + RUNNING. O(1). """ return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING]) def get_task(self, task_id, default=None, setdefault=None): if setdefault: task = self._tasks.setdefault(task_id, setdefault) self._status_tasks[task.status][task.id] = task return task else: return self._tasks.get(task_id, default) def has_task(self, task_id): return task_id in self._tasks def re_enable(self, task, config=None): task.scheduler_disable_time = None task.failures.clear() if config: self.set_status(task, FAILED, config) task.failures.clear() def set_batch_running(self, task, batch_id, worker_id): self.set_status(task, BATCH_RUNNING) task.batch_id = batch_id task.worker_running = worker_id def set_status(self, task, new_status, config=None): if new_status == FAILED: assert config is not None if new_status == DISABLED and task.status in (RUNNING, BATCH_RUNNING): return remove_on_failure = task.batch_id is not None and not task.batchable if task.status == DISABLED: if new_status == DONE: self.re_enable(task) # don't allow workers to override a scheduler disable elif task.scheduler_disable_time is not None and new_status != DISABLED: return if task.status == RUNNING and task.batch_id is not None: for batch_task in self.get_batch_running_tasks(task.batch_id): self.set_status(batch_task, new_status, config) batch_task.batch_id = None task.batch_id = None if new_status == FAILED and task.status != DISABLED: task.add_failure() if task.has_excessive_failures(): task.scheduler_disable_time = time.time() new_status = DISABLED notifications.send_error_email( 'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id), '{task} failed {failures} times in the last {window} seconds, so it is being ' 'disabled for {persist} seconds'.format( failures=task.retry_policy.retry_count, task=task.id, window=config.disable_window, persist=config.disable_persist, )) elif new_status == DISABLED: task.scheduler_disable_time = None if new_status != task.status: self._status_tasks[task.status].pop(task.id) self._status_tasks[new_status][task.id] = task task.status = new_status task.updated = time.time() if new_status == FAILED: task.retry = time.time() + config.retry_delay if remove_on_failure: task.remove = time.time() def fail_dead_worker_task(self, task, config, assistants): # If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants: logger.info("Task %r is marked as running by disconnected worker %r -> marking as " "FAILED with retry delay of %rs", task.id, task.worker_running, config.retry_delay) task.worker_running = None self.set_status(task, FAILED, config) task.retry = time.time() + config.retry_delay def update_status(self, task, config): # Mark tasks with no remaining active stakeholders for deletion if (not task.stakeholders) and (task.remove is None) and (task.status != RUNNING): # We don't check for the RUNNING case, because that is already handled # by the fail_dead_worker_task function. logger.debug("Task %r has no stakeholders anymore -> might remove " "task in %s seconds", task.id, config.remove_delay) task.remove = time.time() + config.remove_delay # Re-enable task after the disable time expires if task.status == DISABLED and task.scheduler_disable_time is not None: if time.time() - task.scheduler_disable_time > config.disable_persist: self.re_enable(task, config) # Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0 if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time(): self.set_status(task, PENDING, config) def may_prune(self, task): return task.remove and time.time() >= task.remove def inactivate_tasks(self, delete_tasks): # The terminology is a bit confusing: we used to "delete" tasks when they became inactive, # but with a pluggable state storage, you might very well want to keep some history of # older tasks as well. That's why we call it "inactivate" (as in the verb) for task in delete_tasks: task_obj = self._tasks.pop(task) self._status_tasks[task_obj.status].pop(task) def get_active_workers(self, last_active_lt=None, last_get_work_gt=None): for worker in six.itervalues(self._active_workers): if last_active_lt is not None and worker.last_active >= last_active_lt: continue last_get_work = getattr(worker, 'last_get_work', None) if last_get_work_gt is not None and ( last_get_work is None or last_get_work <= last_get_work_gt): continue yield worker def get_assistants(self, last_active_lt=None): return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt)) def get_worker_ids(self): return self._active_workers.keys() # only used for unit tests def get_worker(self, worker_id): return self._active_workers.setdefault(worker_id, Worker(worker_id)) def inactivate_workers(self, delete_workers): # Mark workers as inactive for worker in delete_workers: self._active_workers.pop(worker) self._remove_workers_from_tasks(delete_workers) def _remove_workers_from_tasks(self, workers, remove_stakeholders=True): for task in self.get_active_tasks(): if remove_stakeholders: task.stakeholders.difference_update(workers) task.workers.difference_update(workers) def disable_workers(self, workers): self._remove_workers_from_tasks(workers, remove_stakeholders=False) for worker in workers: self.get_worker(worker).disabled = True class Scheduler(object): """ Async scheduler that can handle multiple workers, etc. Can be run locally or on a server (using RemoteScheduler + server.Server). """ def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs): """ Keyword Arguments: :param config: an object of class "scheduler" or None (in which the global instance will be used) :param resources: a dict of str->int constraints :param task_history_impl: ignore config and use this object as the task history """ self._config = config or scheduler(**kwargs) self._state = SimpleTaskState(self._config.state_path) if task_history_impl: self._task_history = task_history_impl elif self._config.record_task_history: from luigi import db_task_history # Needs sqlalchemy, thus imported here self._task_history = db_task_history.DbTaskHistory() else: self._task_history = history.NopHistory() self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter? self._make_task = functools.partial(Task, retry_policy=self._config._get_retry_policy()) self._worker_requests = {} def load(self): self._state.load() def dump(self): self._state.dump() @rpc_method() def prune(self): logger.info("Starting pruning of task graph") self._prune_workers() self._prune_tasks() logger.info("Done pruning task graph") def _prune_workers(self): remove_workers = [] for worker in self._state.get_active_workers(): if worker.prune(self._config): logger.debug("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay) remove_workers.append(worker.id) self._state.inactivate_workers(remove_workers) def _prune_tasks(self): assistant_ids = set(w.id for w in self._state.get_assistants()) remove_tasks = [] for task in self._state.get_active_tasks(): self._state.fail_dead_worker_task(task, self._config, assistant_ids) self._state.update_status(task, self._config) if self._state.may_prune(task): logger.info("Removing task %r", task.id) remove_tasks.append(task.id) self._state.inactivate_tasks(remove_tasks) def update(self, worker_id, worker_reference=None, get_work=False): """ Keep track of whenever the worker was last active. """ worker = self._state.get_worker(worker_id) worker.update(worker_reference, get_work=get_work) return not getattr(worker, 'disabled', False) def _update_priority(self, task, prio, worker): """ Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled. """ task.priority = prio = max(prio, task.priority) for dep in task.deps or []: t = self._state.get_task(dep) if t is not None and prio > t.priority: self._update_priority(t, prio, worker) @rpc_method() def add_task_batcher(self, worker, task_family, batched_args, max_batch_size=float('inf')): self._state.set_batcher(worker, task_family, batched_args, max_batch_size) @rpc_method() def add_task(self, task_id=None, status=PENDING, runnable=True, deps=None, new_deps=None, expl=None, resources=None, priority=0, family='', module=None, params=None, assistant=False, tracking_url=None, worker=None, batchable=None, batch_id=None, retry_policy_dict={}, **kwargs): """ * add task identified by task_id if it doesn't exist * if deps is not None, update dependency list * update status of task * add additional workers/stakeholders * update priority when needed """ assert worker is not None worker_id = worker worker_enabled = self.update(worker_id) retry_policy = self._generate_retry_policy(retry_policy_dict) if worker_enabled: _default_task = self._make_task( task_id=task_id, status=PENDING, deps=deps, resources=resources, priority=priority, family=family, module=module, params=params, ) else: _default_task = None task = self._state.get_task(task_id, setdefault=_default_task) if task is None or (task.status != RUNNING and not worker_enabled): return # for setting priority, we'll sometimes create tasks with unset family and params if not task.family: task.family = family if not getattr(task, 'module', None): task.module = module if not task.params: task.params = _get_default(params, {}) if batch_id is not None: task.batch_id = batch_id if status == RUNNING and not task.worker_running: task.worker_running = worker_id if tracking_url is not None or task.status != RUNNING: task.tracking_url = tracking_url if task.batch_id is not None: for batch_task in self._state.get_batch_running_tasks(task.batch_id): batch_task.tracking_url = tracking_url if batchable is not None: task.batchable = batchable if task.remove is not None: task.remove = None # unmark task for removal so it isn't removed after being added if expl is not None: task.expl = expl if task.batch_id is not None: for batch_task in self._state.get_batch_running_tasks(task.batch_id): batch_task.expl = expl if not (task.status in (RUNNING, BATCH_RUNNING) and status == PENDING) or new_deps: # don't allow re-scheduling of task while it is running, it must either fail or succeed first if status == PENDING or status != task.status: # Update the DB only if there was a acctual change, to prevent noise. # We also check for status == PENDING b/c that's the default value # (so checking for status != task.status woule lie) self._update_task_history(task, status) self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config) if deps is not None: task.deps = set(deps) if new_deps is not None: task.deps.update(new_deps) if resources is not None: task.resources = resources if worker_enabled and not assistant: task.stakeholders.add(worker_id) # Task dependencies might not exist yet. Let's create dummy tasks for them for now. # Otherwise the task dependencies might end up being pruned if scheduling takes a long time for dep in task.deps or []: t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority)) t.stakeholders.add(worker_id) self._update_priority(task, priority, worker_id) # Because some tasks (non-dynamic dependencies) are `_make_task`ed # before we know their retry_policy, we always set it here task.retry_policy = retry_policy if runnable and status != FAILED and worker_enabled: task.workers.add(worker_id) self._state.get_worker(worker_id).tasks.add(task) task.runnable = runnable @rpc_method() def add_worker(self, worker, info, **kwargs): self._state.get_worker(worker).add_info(info) @rpc_method() def disable_worker(self, worker): self._state.disable_workers({worker}) @rpc_method() def update_resources(self, **resources): if self._resources is None: self._resources = {} self._resources.update(resources) def _generate_retry_policy(self, task_retry_policy_dict): retry_policy_dict = self._config._get_retry_policy()._asdict() retry_policy_dict.update({k: v for k, v in six.iteritems(task_retry_policy_dict) if v is not None}) return RetryPolicy(**retry_policy_dict) def _has_resources(self, needed_resources, used_resources): if needed_resources is None: return True available_resources = self._resources or {} for resource, amount in six.iteritems(needed_resources): if amount + used_resources[resource] > available_resources.get(resource, 1): return False return True def _used_resources(self): used_resources = collections.defaultdict(int) if self._resources is not None: for task in self._state.get_active_tasks(status=RUNNING): if task.resources: for resource, amount in six.iteritems(task.resources): used_resources[resource] += amount return used_resources def _rank(self, task): """ Return worker's rank function for task scheduling. :return: """ return task.priority, -task.time def _schedulable(self, task): if task.status != PENDING: return False for dep in task.deps: dep_task = self._state.get_task(dep, default=None) if dep_task is None or dep_task.status != DONE: return False return True def _reset_orphaned_batch_running_tasks(self, worker_id): running_batch_ids = { task.batch_id for task in self._state.get_running_tasks() if task.worker_running == worker_id } orphaned_tasks = [ task for task in self._state.get_active_tasks(BATCH_RUNNING) if task.worker_running == worker_id and task.batch_id not in running_batch_ids ] for task in orphaned_tasks: self._state.set_status(task, PENDING) @rpc_method(allow_null=False) def get_work(self, host=None, assistant=False, current_tasks=None, worker=None, **kwargs): # TODO: remove any expired nodes # Algo: iterate over all nodes, find the highest priority node no dependencies and available # resources. # Resource checking looks both at currently available resources and at which resources would # be available if all running tasks died and we rescheduled all workers greedily. We do both # checks in order to prevent a worker with many low-priority tasks from starving other # workers with higher priority tasks that share the same resources. # TODO: remove tasks that can't be done, figure out if the worker has absolutely # nothing it can wait for if self._config.prune_on_get_work: self.prune() assert worker is not None worker_id = worker # Return remaining tasks that have no FAILED descendants self.update(worker_id, {'host': host}, get_work=True) if assistant: self.add_worker(worker_id, [('assistant', assistant)]) batched_params, unbatched_params, batched_tasks, max_batch_size = None, None, [], 1 best_task = None if current_tasks is not None: ct_set = set(current_tasks) for task in sorted(self._state.get_running_tasks(), key=self._rank): if task.worker_running == worker_id and task.id not in ct_set: best_task = task if current_tasks is not None: # batch running tasks that weren't claimed since the last get_work go back in the pool self._reset_orphaned_batch_running_tasks(worker_id) locally_pending_tasks = 0 running_tasks = [] upstream_table = {} greedy_resources = collections.defaultdict(int) n_unique_pending = 0 worker = self._state.get_worker(worker_id) if worker.is_trivial_worker(self._state): relevant_tasks = worker.get_pending_tasks(self._state) used_resources = collections.defaultdict(int) greedy_workers = dict() # If there's no resources, then they can grab any task else: relevant_tasks = self._state.get_pending_tasks() used_resources = self._used_resources() activity_limit = time.time() - self._config.worker_disconnect_delay active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit) greedy_workers = dict((worker.id, worker.info.get('workers', 1)) for worker in active_workers) tasks = list(relevant_tasks) tasks.sort(key=self._rank, reverse=True) for task in tasks: in_workers = (assistant and getattr(task, 'runnable', bool(task.workers))) or worker_id in task.workers if task.status == RUNNING and in_workers: # Return a list of currently running tasks to the client, # makes it easier to troubleshoot other_worker = self._state.get_worker(task.worker_running) more_info = {'task_id': task.id, 'worker': str(other_worker)} if other_worker is not None: more_info.update(other_worker.info) running_tasks.append(more_info) if task.status == PENDING and in_workers: upstream_status = self._upstream_status(task.id, upstream_table) if upstream_status != UPSTREAM_DISABLED: locally_pending_tasks += 1 if len(task.workers) == 1 and not assistant: n_unique_pending += 1 if (best_task and batched_params and task.family == best_task.family and len(batched_tasks) < max_batch_size and task.is_batchable() and all( task.params.get(name) == value for name, value in unbatched_params.items()) and self._schedulable(task)): for name, params in batched_params.items(): params.append(task.params.get(name)) batched_tasks.append(task) if best_task: continue if task.status == RUNNING and (task.worker_running in greedy_workers): greedy_workers[task.worker_running] -= 1 for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount if self._schedulable(task) and self._has_resources(task.resources, greedy_resources): if in_workers and self._has_resources(task.resources, used_resources): best_task = task batch_param_names, max_batch_size = self._state.get_batcher( worker_id, task.family) if batch_param_names and task.is_batchable(): try: batched_params = { name: [task.params[name]] for name in batch_param_names } unbatched_params = { name: value for name, value in task.params.items() if name not in batched_params } batched_tasks.append(task) except KeyError: batched_params, unbatched_params = None, None else: workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers for task_worker in workers: if greedy_workers.get(task_worker, 0) > 0: # use up a worker greedy_workers[task_worker] -= 1 # keep track of the resources used in greedy scheduling for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount break reply = {'n_pending_tasks': locally_pending_tasks, 'running_tasks': running_tasks, 'task_id': None, 'n_unique_pending': n_unique_pending} if len(batched_tasks) > 1: batch_string = '|'.join(task.id for task in batched_tasks) batch_id = hashlib.md5(batch_string.encode('utf-8')).hexdigest() for task in batched_tasks: self._state.set_batch_running(task, batch_id, worker_id) combined_params = best_task.params.copy() combined_params.update(batched_params) reply['task_id'] = None reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = combined_params reply['batch_id'] = batch_id reply['batch_task_ids'] = [task.id for task in batched_tasks] elif best_task: self._state.set_status(best_task, RUNNING, self._config) best_task.worker_running = worker_id best_task.time_running = time.time() self._update_task_history(best_task, RUNNING, host=host) reply['task_id'] = best_task.id reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = best_task.params return reply @rpc_method(attempts=1) def ping(self, **kwargs): worker_id = kwargs['worker'] self.update(worker_id) def _upstream_status(self, task_id, upstream_status_table): if task_id in upstream_status_table: return upstream_status_table[task_id] elif self._state.has_task(task_id): task_stack = [task_id] while task_stack: dep_id = task_stack.pop() dep = self._state.get_task(dep_id) if dep: if dep.status == DONE: continue if dep_id not in upstream_status_table: if dep.status == PENDING and dep.deps: task_stack += [dep_id] + list(dep.deps) upstream_status_table[dep_id] = '' # will be updated postorder else: dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '') upstream_status_table[dep_id] = dep_status elif upstream_status_table[dep_id] == '' and dep.deps: # This is the postorder update step when we set the # status based on the previously calculated child elements upstream_severities = list(upstream_status_table.get(a_task_id) for a_task_id in dep.deps if a_task_id in upstream_status_table) or [''] status = min(upstream_severities, key=UPSTREAM_SEVERITY_KEY) upstream_status_table[dep_id] = status return upstream_status_table[dep_id] def _serialize_task(self, task_id, include_deps=True, deps=None): task = self._state.get_task(task_id) ret = { 'display_name': task.pretty_id, 'status': task.status, 'workers': list(task.workers), 'worker_running': task.worker_running, 'time_running': getattr(task, "time_running", None), 'start_time': task.time, 'last_updated': getattr(task, "updated", task.time), 'params': task.params, 'name': task.family, 'priority': task.priority, 'resources': task.resources, 'tracking_url': getattr(task, "tracking_url", None), 'status_message': getattr(task, "status_message", None) } if task.status == DISABLED: ret['re_enable_able'] = task.scheduler_disable_time is not None if include_deps: ret['deps'] = list(task.deps if deps is None else deps) return ret @rpc_method() def graph(self, **kwargs): self.prune() serialized = {} seen = set() for task in self._state.get_active_tasks(): serialized.update(self._traverse_graph(task.id, seen)) return serialized def _filter_done(self, task_ids): for task_id in task_ids: task = self._state.get_task(task_id) if task is None or task.status != DONE: yield task_id def _traverse_graph(self, root_task_id, seen=None, dep_func=None, include_done=True): """ Returns the dependency graph rooted at task_id This does a breadth-first traversal to find the nodes closest to the root before hitting the scheduler.max_graph_nodes limit. :param root_task_id: the id of the graph's root :return: A map of task id to serialized node """ if seen is None: seen = set() elif root_task_id in seen: return {} if dep_func is None: def dep_func(t): return t.deps seen.add(root_task_id) serialized = {} queue = collections.deque([root_task_id]) while queue: task_id = queue.popleft() task = self._state.get_task(task_id) if task is None or not task.family: logger.debug('Missing task for id [%s]', task_id) # NOTE : If a dependency is missing from self._state there is no way to deduce the # task family and parameters. family_match = TASK_FAMILY_RE.match(task_id) family = family_match.group(1) if family_match else UNKNOWN params = {'task_id': task_id} serialized[task_id] = { 'deps': [], 'status': UNKNOWN, 'workers': [], 'start_time': UNKNOWN, 'params': params, 'name': family, 'display_name': task_id, 'priority': 0, } else: deps = dep_func(task) if not include_done: deps = list(self._filter_done(deps)) serialized[task_id] = self._serialize_task(task_id, deps=deps) for dep in sorted(deps): if dep not in seen: seen.add(dep) queue.append(dep) if task_id != root_task_id: del serialized[task_id]['display_name'] if len(serialized) >= self._config.max_graph_nodes: break return serialized @rpc_method() def dep_graph(self, task_id, include_done=True, **kwargs): self.prune() if not self._state.has_task(task_id): return {} return self._traverse_graph(task_id, include_done=include_done) @rpc_method() def inverse_dep_graph(self, task_id, include_done=True, **kwargs): self.prune() if not self._state.has_task(task_id): return {} inverse_graph = collections.defaultdict(set) for task in self._state.get_active_tasks(): for dep in task.deps: inverse_graph[dep].add(task.id) return self._traverse_graph( task_id, dep_func=lambda t: inverse_graph[t.id], include_done=include_done) @rpc_method() def task_list(self, status='', upstream_status='', limit=True, search=None, **kwargs): """ Query for a subset of tasks by status. """ self.prune() result = {} upstream_status_table = {} # used to memoize upstream status if search is None: def filter_func(_): return True else: terms = search.split() def filter_func(t): return all(term in t.pretty_id for term in terms) for task in filter(filter_func, self._state.get_active_tasks(status)): if task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table): serialized = self._serialize_task(task.id, False) result[task.id] = serialized if limit and len(result) > self._config.max_shown_tasks: return {'num_tasks': len(result)} return result def _first_task_display_name(self, worker): task_id = worker.info.get('first_task', '') if self._state.has_task(task_id): return self._state.get_task(task_id).pretty_id else: return task_id @rpc_method() def worker_list(self, include_running=True, **kwargs): self.prune() workers = [ dict( name=worker.id, last_active=worker.last_active, started=getattr(worker, 'started', None), first_task_display_name=self._first_task_display_name(worker), **worker.info ) for worker in self._state.get_active_workers()] workers.sort(key=lambda worker: worker['started'], reverse=True) if include_running: running = collections.defaultdict(dict) num_pending = collections.defaultdict(int) num_uniques = collections.defaultdict(int) for task in self._state.get_pending_tasks(): if task.status == RUNNING and task.worker_running: running[task.worker_running][task.id] = self._serialize_task(task.id, False) elif task.status == PENDING: for worker in task.workers: num_pending[worker] += 1 if len(task.workers) == 1: num_uniques[list(task.workers)[0]] += 1 for worker in workers: tasks = running[worker['name']] worker['num_running'] = len(tasks) worker['num_pending'] = num_pending[worker['name']] worker['num_uniques'] = num_uniques[worker['name']] worker['running'] = tasks return workers @rpc_method() def resource_list(self): """ Resources usage info and their consumers (tasks). """ self.prune() resources = [ dict( name=resource, num_total=r_dict['total'], num_used=r_dict['used'] ) for resource, r_dict in six.iteritems(self.resources())] if self._resources is not None: consumers = collections.defaultdict(dict) for task in self._state.get_running_tasks(): if task.status == RUNNING and task.resources: for resource, amount in six.iteritems(task.resources): consumers[resource][task.id] = self._serialize_task(task.id, False) for resource in resources: tasks = consumers[resource['name']] resource['num_consumer'] = len(tasks) resource['running'] = tasks return resources def resources(self): ''' get total resources and available ones ''' used_resources = self._used_resources() ret = collections.defaultdict(dict) for resource, total in six.iteritems(self._resources): ret[resource]['total'] = total if resource in used_resources: ret[resource]['used'] = used_resources[resource] else: ret[resource]['used'] = 0 return ret @rpc_method() def task_search(self, task_str, **kwargs): """ Query for a subset of tasks by task_id. :param task_str: :return: """ self.prune() result = collections.defaultdict(dict) for task in self._state.get_active_tasks(): if task.id.find(task_str) != -1: serialized = self._serialize_task(task.id, False) result[task.status][task.id] = serialized return result @rpc_method() def re_enable_task(self, task_id): serialized = {} task = self._state.get_task(task_id) if task and task.status == DISABLED and task.scheduler_disable_time: self._state.re_enable(task, self._config) serialized = self._serialize_task(task_id) return serialized @rpc_method() def fetch_error(self, task_id, **kwargs): if self._state.has_task(task_id): task = self._state.get_task(task_id) return {"taskId": task_id, "error": task.expl, 'displayName': task.pretty_id} else: return {"taskId": task_id, "error": ""} @rpc_method() def set_task_status_message(self, task_id, status_message): if self._state.has_task(task_id): task = self._state.get_task(task_id) task.status_message = status_message if task.status == RUNNING and task.batch_id is not None: for batch_task in self._state.get_batch_running_tasks(task.batch_id): batch_task.status_message = status_message @rpc_method() def get_task_status_message(self, task_id): if self._state.has_task(task_id): task = self._state.get_task(task_id) return {"taskId": task_id, "statusMessage": task.status_message} else: return {"taskId": task_id, "statusMessage": ""} def _update_task_history(self, task, status, host=None): try: if status == DONE or status == FAILED: successful = (status == DONE) self._task_history.task_finished(task, successful) elif status == PENDING: self._task_history.task_scheduled(task) elif status == RUNNING: self._task_history.task_started(task, host) except BaseException: logger.warning("Error saving Task history", exc_info=True) @property def task_history(self): # Used by server.py to expose the calls return self._task_history
BugsInPy/BugsInPy/temp/projects/luigi/bug-11-fixed/luigi/luigi/scheduler.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-11-buggy/luigi/luigi/scheduler.py
luigi-bug-6
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ''' Parameters are one of the core concepts of Luigi. All Parameters sit on :class:`~luigi.task.Task` classes. See :ref:`Parameter` for more info on how to define parameters. ''' import abc import datetime import warnings import json from json import JSONEncoder from collections import OrderedDict, Mapping import operator import functools from ast import literal_eval try: from ConfigParser import NoOptionError, NoSectionError except ImportError: from configparser import NoOptionError, NoSectionError from luigi import task_register from luigi import six from luigi import configuration from luigi.cmdline_parser import CmdlineParser _no_value = object() class ParameterException(Exception): """ Base exception. """ pass class MissingParameterException(ParameterException): """ Exception signifying that there was a missing Parameter. """ pass class UnknownParameterException(ParameterException): """ Exception signifying that an unknown Parameter was supplied. """ pass class DuplicateParameterException(ParameterException): """ Exception signifying that a Parameter was specified multiple times. """ pass class Parameter(object): """ Parameter whose value is a ``str``, and a base class for other parameter types. Parameters are objects set on the Task class level to make it possible to parameterize tasks. For instance: .. code:: python class MyTask(luigi.Task): foo = luigi.Parameter() class RequiringTask(luigi.Task): def requires(self): return MyTask(foo="hello") def run(self): print(self.requires().foo) # prints "hello" This makes it possible to instantiate multiple tasks, eg ``MyTask(foo='bar')`` and ``MyTask(foo='baz')``. The task will then have the ``foo`` attribute set appropriately. When a task is instantiated, it will first use any argument as the value of the parameter, eg. if you instantiate ``a = TaskA(x=44)`` then ``a.x == 44``. When the value is not provided, the value will be resolved in this order of falling priority: * Any value provided on the command line: - To the root task (eg. ``--param xyz``) - Then to the class, using the qualified task name syntax (eg. ``--TaskA-param xyz``). * With ``[TASK_NAME]>PARAM_NAME: <serialized value>`` syntax. See :ref:`ParamConfigIngestion` * Any default value set using the ``default`` flag. Parameter objects may be reused, but you must then set the ``positional=False`` flag. """ _counter = 0 # non-atomically increasing counter used for ordering parameters. def __init__(self, default=_no_value, is_global=False, significant=True, description=None, config_path=None, positional=True, always_in_help=False, batch_method=None): """ :param default: the default value for this parameter. This should match the type of the Parameter, i.e. ``datetime.date`` for ``DateParameter`` or ``int`` for ``IntParameter``. By default, no default is stored and the value must be specified at runtime. :param bool significant: specify ``False`` if the parameter should not be treated as part of the unique identifier for a Task. An insignificant Parameter might also be used to specify a password or other sensitive information that should not be made public via the scheduler. Default: ``True``. :param str description: A human-readable string describing the purpose of this Parameter. For command-line invocations, this will be used as the `help` string shown to users. Default: ``None``. :param dict config_path: a dictionary with entries ``section`` and ``name`` specifying a config file entry from which to read the default value for this parameter. DEPRECATED. Default: ``None``. :param bool positional: If true, you can set the argument as a positional argument. It's true by default but we recommend ``positional=False`` for abstract base classes and similar cases. :param bool always_in_help: For the --help option in the command line parsing. Set true to always show in --help. :param function(iterable[A])->A batch_method: Method to combine an iterable of parsed parameter values into a single value. Used when receiving batched parameter lists from the scheduler. See :ref:`batch_method` """ self._default = default self._batch_method = batch_method if is_global: warnings.warn("is_global support is removed. Assuming positional=False", DeprecationWarning, stacklevel=2) positional = False self.significant = significant # Whether different values for this parameter will differentiate otherwise equal tasks self.positional = positional self.description = description self.always_in_help = always_in_help if config_path is not None and ('section' not in config_path or 'name' not in config_path): raise ParameterException('config_path must be a hash containing entries for section and name') self._config_path = config_path self._counter = Parameter._counter # We need to keep track of this to get the order right (see Task class) Parameter._counter += 1 def _get_value_from_config(self, section, name): """Loads the default from the config. Returns _no_value if it doesn't exist""" conf = configuration.get_config() try: value = conf.get(section, name) except (NoSectionError, NoOptionError): return _no_value return self.parse(value) def _get_value(self, task_name, param_name): for value, warn in self._value_iterator(task_name, param_name): if value != _no_value: if warn: warnings.warn(warn, DeprecationWarning) return value return _no_value def _value_iterator(self, task_name, param_name): """ Yield the parameter values, with optional deprecation warning as second tuple value. The parameter value will be whatever non-_no_value that is yielded first. """ cp_parser = CmdlineParser.get_instance() if cp_parser: dest = self._parser_global_dest(param_name, task_name) found = getattr(cp_parser.known_args, dest, None) yield (self._parse_or_no_value(found), None) yield (self._get_value_from_config(task_name, param_name), None) yield (self._get_value_from_config(task_name, param_name.replace('_', '-')), 'Configuration [{}] {} (with dashes) should be avoided. Please use underscores.'.format( task_name, param_name)) if self._config_path: yield (self._get_value_from_config(self._config_path['section'], self._config_path['name']), 'The use of the configuration [{}] {} is deprecated. Please use [{}] {}'.format( self._config_path['section'], self._config_path['name'], task_name, param_name)) yield (self._default, None) def has_task_value(self, task_name, param_name): return self._get_value(task_name, param_name) != _no_value def task_value(self, task_name, param_name): value = self._get_value(task_name, param_name) if value == _no_value: raise MissingParameterException("No default specified") else: return self.normalize(value) def _is_batchable(self): return self._batch_method is not None def parse(self, x): """ Parse an individual value from the input. The default implementation is the identity function, but subclasses should override this method for specialized parsing. :param str x: the value to parse. :return: the parsed value. """ return x # default impl def _parse_list(self, xs): """ Parse a list of values from the scheduler. Only possible if this is_batchable() is True. This will combine the list into a single parameter value using batch method. This should never need to be overridden. :param xs: list of values to parse and combine :return: the combined parsed values """ if not self._is_batchable(): raise NotImplementedError('No batch method found') elif not xs: raise ValueError('Empty parameter list passed to parse_list') else: return self._batch_method(map(self.parse, xs)) def serialize(self, x): """ Opposite of :py:meth:`parse`. Converts the value ``x`` to a string. :param x: the value to serialize. """ return str(x) def _warn_on_wrong_param_type(self, param_name, param_value): if self.__class__ != Parameter: return if not isinstance(param_value, six.string_types): warnings.warn('Parameter "{}" with value "{}" is not of type string.'.format(param_name, param_value)) def normalize(self, x): """ Given a parsed parameter value, normalizes it. The value can either be the result of parse(), the default value or arguments passed into the task's constructor by instantiation. This is very implementation defined, but can be used to validate/clamp valid values. For example, if you wanted to only accept even integers, and "correct" odd values to the nearest integer, you can implement normalize as ``x // 2 * 2``. """ return x # default impl def next_in_enumeration(self, _value): """ If your Parameter type has an enumerable ordering of values. You can choose to override this method. This method is used by the :py:mod:`luigi.execution_summary` module for pretty printing purposes. Enabling it to pretty print tasks like ``MyTask(num=1), MyTask(num=2), MyTask(num=3)`` to ``MyTask(num=1..3)``. :param value: The value :return: The next value, like "value + 1". Or ``None`` if there's no enumerable ordering. """ return None def _parse_or_no_value(self, x): if not x: return _no_value else: return self.parse(x) @staticmethod def _parser_global_dest(param_name, task_name): return task_name + '_' + param_name @staticmethod def _parser_action(): return "store" _UNIX_EPOCH = datetime.datetime.utcfromtimestamp(0) class _DateParameterBase(Parameter): """ Base class Parameter for date (not datetime). """ def __init__(self, interval=1, start=None, **kwargs): super(_DateParameterBase, self).__init__(**kwargs) self.interval = interval self.start = start if start is not None else _UNIX_EPOCH.date() @abc.abstractproperty def date_format(self): """ Override me with a :py:meth:`~datetime.date.strftime` string. """ pass def parse(self, s): """ Parses a date string formatted like ``YYYY-MM-DD``. """ return datetime.datetime.strptime(s, self.date_format).date() def serialize(self, dt): """ Converts the date to a string using the :py:attr:`~_DateParameterBase.date_format`. """ if dt is None: return str(dt) return dt.strftime(self.date_format) class DateParameter(_DateParameterBase): """ Parameter whose value is a :py:class:`~datetime.date`. A DateParameter is a Date string formatted ``YYYY-MM-DD``. For example, ``2013-07-10`` specifies July 10, 2013. DateParameters are 90% of the time used to be interpolated into file system paths or the like. Here is a gentle reminder of how to interpolate date parameters into strings: .. code:: python class MyTask(luigi.Task): date = luigi.DateParameter() def run(self): templated_path = "/my/path/to/my/dataset/{date:%Y/%m/%d}/" instantiated_path = templated_path.format(date=self.date) # print(instantiated_path) --> /my/path/to/my/dataset/2016/06/09/ # ... use instantiated_path ... To set this parameter to default to the current day. You can write code like this: .. code:: python import datetime class MyTask(luigi.Task): date = luigi.DateParameter(default=datetime.date.today()) """ date_format = '%Y-%m-%d' def next_in_enumeration(self, value): return value + datetime.timedelta(days=self.interval) def normalize(self, value): if value is None: return None if isinstance(value, datetime.datetime): value = value.date() delta = (value - self.start).days % self.interval return value - datetime.timedelta(days=delta) class MonthParameter(DateParameter): """ Parameter whose value is a :py:class:`~datetime.date`, specified to the month (day of :py:class:`~datetime.date` is "rounded" to first of the month). A MonthParameter is a Date string formatted ``YYYY-MM``. For example, ``2013-07`` specifies July of 2013. """ date_format = '%Y-%m' def _add_months(self, date, months): """ Add ``months`` months to ``date``. Unfortunately we can't use timedeltas to add months because timedelta counts in days and there's no foolproof way to add N months in days without counting the number of days per month. """ year = date.year + (date.month + months - 1) // 12 month = (date.month + months - 1) % 12 + 1 return datetime.date(year=year, month=month, day=1) def next_in_enumeration(self, value): return self._add_months(value, self.interval) def normalize(self, value): if value is None: return None months_since_start = (value.year - self.start.year) * 12 + (value.month - self.start.month) months_since_start -= months_since_start % self.interval return self._add_months(self.start, months_since_start) class YearParameter(DateParameter): """ Parameter whose value is a :py:class:`~datetime.date`, specified to the year (day and month of :py:class:`~datetime.date` is "rounded" to first day of the year). A YearParameter is a Date string formatted ``YYYY``. """ date_format = '%Y' def next_in_enumeration(self, value): return value.replace(year=value.year + self.interval) def normalize(self, value): if value is None: return None delta = (value.year - self.start.year) % self.interval return datetime.date(year=value.year - delta, month=1, day=1) class _DatetimeParameterBase(Parameter): """ Base class Parameter for datetime """ def __init__(self, interval=1, start=None, **kwargs): super(_DatetimeParameterBase, self).__init__(**kwargs) self.interval = interval self.start = start if start is not None else _UNIX_EPOCH @abc.abstractproperty def date_format(self): """ Override me with a :py:meth:`~datetime.date.strftime` string. """ pass @abc.abstractproperty def _timedelta(self): """ How to move one interval of this type forward (i.e. not counting self.interval). """ pass def parse(self, s): """ Parses a string to a :py:class:`~datetime.datetime`. """ return datetime.datetime.strptime(s, self.date_format) def serialize(self, dt): """ Converts the date to a string using the :py:attr:`~_DatetimeParameterBase.date_format`. """ if dt is None: return str(dt) return dt.strftime(self.date_format) def normalize(self, dt): """ Clamp dt to every Nth :py:attr:`~_DatetimeParameterBase.interval` starting at :py:attr:`~_DatetimeParameterBase.start`. """ if dt is None: return None dt = dt.replace(microsecond=0) # remove microseconds, to avoid float rounding issues. delta = (dt - self.start).total_seconds() granularity = (self._timedelta * self.interval).total_seconds() return dt - datetime.timedelta(seconds=delta % granularity) def next_in_enumeration(self, value): return value + self._timedelta * self.interval class DateHourParameter(_DatetimeParameterBase): """ Parameter whose value is a :py:class:`~datetime.datetime` specified to the hour. A DateHourParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted date and time specified to the hour. For example, ``2013-07-10T19`` specifies July 10, 2013 at 19:00. """ date_format = '%Y-%m-%dT%H' # ISO 8601 is to use 'T' _timedelta = datetime.timedelta(hours=1) class DateMinuteParameter(_DatetimeParameterBase): """ Parameter whose value is a :py:class:`~datetime.datetime` specified to the minute. A DateMinuteParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted date and time specified to the minute. For example, ``2013-07-10T1907`` specifies July 10, 2013 at 19:07. The interval parameter can be used to clamp this parameter to every N minutes, instead of every minute. """ date_format = '%Y-%m-%dT%H%M' _timedelta = datetime.timedelta(minutes=1) deprecated_date_format = '%Y-%m-%dT%HH%M' def parse(self, s): try: value = datetime.datetime.strptime(s, self.deprecated_date_format) warnings.warn( 'Using "H" between hours and minutes is deprecated, omit it instead.', DeprecationWarning, stacklevel=2 ) return value except ValueError: return super(DateMinuteParameter, self).parse(s) class DateSecondParameter(_DatetimeParameterBase): """ Parameter whose value is a :py:class:`~datetime.datetime` specified to the second. A DateSecondParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted date and time specified to the second. For example, ``2013-07-10T190738`` specifies July 10, 2013 at 19:07:38. The interval parameter can be used to clamp this parameter to every N seconds, instead of every second. """ date_format = '%Y-%m-%dT%H%M%S' _timedelta = datetime.timedelta(seconds=1) class IntParameter(Parameter): """ Parameter whose value is an ``int``. """ def parse(self, s): """ Parses an ``int`` from the string using ``int()``. """ return int(s) def next_in_enumeration(self, value): return value + 1 class FloatParameter(Parameter): """ Parameter whose value is a ``float``. """ def parse(self, s): """ Parses a ``float`` from the string using ``float()``. """ return float(s) class BoolParameter(Parameter): """ A Parameter whose value is a ``bool``. This parameter have an implicit default value of ``False``. """ def __init__(self, *args, **kwargs): super(BoolParameter, self).__init__(*args, **kwargs) if self._default == _no_value: self._default = False def parse(self, s): """ Parses a ``bool`` from the string, matching 'true' or 'false' ignoring case. """ return {'true': True, 'false': False}[str(s).lower()] def normalize(self, value): # coerce anything truthy to True return bool(value) if value is not None else None @staticmethod def _parser_action(): return 'store_true' class DateIntervalParameter(Parameter): """ A Parameter whose value is a :py:class:`~luigi.date_interval.DateInterval`. Date Intervals are specified using the ISO 8601 date notation for dates (eg. "2015-11-04"), months (eg. "2015-05"), years (eg. "2015"), or weeks (eg. "2015-W35"). In addition, it also supports arbitrary date intervals provided as two dates separated with a dash (eg. "2015-11-04-2015-12-04"). """ def parse(self, s): """ Parses a :py:class:`~luigi.date_interval.DateInterval` from the input. see :py:mod:`luigi.date_interval` for details on the parsing of DateIntervals. """ # TODO: can we use xml.utils.iso8601 or something similar? from luigi import date_interval as d for cls in [d.Year, d.Month, d.Week, d.Date, d.Custom]: i = cls.parse(s) if i: return i raise ValueError('Invalid date interval - could not be parsed') class TimeDeltaParameter(Parameter): """ Class that maps to timedelta using strings in any of the following forms: * ``n {w[eek[s]]|d[ay[s]]|h[our[s]]|m[inute[s]|s[second[s]]}`` (e.g. "1 week 2 days" or "1 h") Note: multiple arguments must be supplied in longest to shortest unit order * ISO 8601 duration ``PnDTnHnMnS`` (each field optional, years and months not supported) * ISO 8601 duration ``PnW`` See https://en.wikipedia.org/wiki/ISO_8601#Durations """ def _apply_regex(self, regex, input): import re re_match = re.match(regex, input) if re_match and any(re_match.groups()): kwargs = {} has_val = False for k, v in six.iteritems(re_match.groupdict(default="0")): val = int(v) if val > -1: has_val = True kwargs[k] = val if has_val: return datetime.timedelta(**kwargs) def _parseIso8601(self, input): def field(key): return r"(?P<%s>\d+)%s" % (key, key[0].upper()) def optional_field(key): return "(%s)?" % field(key) # A little loose: ISO 8601 does not allow weeks in combination with other fields, but this regex does (as does python timedelta) regex = "P(%s|%s(T%s)?)" % (field("weeks"), optional_field("days"), "".join([optional_field(key) for key in ["hours", "minutes", "seconds"]])) return self._apply_regex(regex, input) def _parseSimple(self, input): keys = ["weeks", "days", "hours", "minutes", "seconds"] # Give the digits a regex group name from the keys, then look for text with the first letter of the key, # optionally followed by the rest of the word, with final char (the "s") optional regex = "".join([r"((?P<%s>\d+) ?%s(%s)?(%s)? ?)?" % (k, k[0], k[1:-1], k[-1]) for k in keys]) return self._apply_regex(regex, input) def parse(self, input): """ Parses a time delta from the input. See :py:class:`TimeDeltaParameter` for details on supported formats. """ result = self._parseIso8601(input) if not result: result = self._parseSimple(input) if result is not None: return result else: raise ParameterException("Invalid time delta - could not parse %s" % input) def serialize(self, x): """ Converts datetime.timedelta to a string :param x: the value to serialize. """ weeks = x.days // 7 days = x.days % 7 hours = x.seconds // 3600 minutes = (x.seconds % 3600) // 60 seconds = (x.seconds % 3600) % 60 result = "{} w {} d {} h {} m {} s".format(weeks, days, hours, minutes, seconds) return result def _warn_on_wrong_param_type(self, param_name, param_value): if self.__class__ != TimeDeltaParameter: return if not isinstance(param_value, datetime.timedelta): warnings.warn('Parameter "{}" with value "{}" is not of type timedelta.'.format(param_name, param_value)) class TaskParameter(Parameter): """ A parameter that takes another luigi task class. When used programatically, the parameter should be specified directly with the :py:class:`luigi.task.Task` (sub) class. Like ``MyMetaTask(my_task_param=my_tasks.MyTask)``. On the command line, you specify the :py:meth:`luigi.task.Task.get_task_family`. Like .. code-block:: console $ luigi --module my_tasks MyMetaTask --my_task_param my_namespace.MyTask Where ``my_namespace.MyTask`` is defined in the ``my_tasks`` python module. When the :py:class:`luigi.task.Task` class is instantiated to an object. The value will always be a task class (and not a string). """ def parse(self, input): """ Parse a task_famly using the :class:`~luigi.task_register.Register` """ return task_register.Register.get_task_cls(input) def serialize(self, cls): """ Converts the :py:class:`luigi.task.Task` (sub) class to its family name. """ return cls.get_task_family() class EnumParameter(Parameter): """ A parameter whose value is an :class:`~enum.Enum`. In the task definition, use .. code-block:: python class Model(enum.Enum): Honda = 1 Volvo = 2 class MyTask(luigi.Task): my_param = luigi.EnumParameter(enum=Model) At the command line, use, .. code-block:: console $ luigi --module my_tasks MyTask --my-param Honda """ def __init__(self, *args, **kwargs): if 'enum' not in kwargs: raise ParameterException('An enum class must be specified.') self._enum = kwargs.pop('enum') super(EnumParameter, self).__init__(*args, **kwargs) def parse(self, s): try: return self._enum[s] except KeyError: raise ValueError('Invalid enum value - could not be parsed') def serialize(self, e): return e.name class _FrozenOrderedDict(Mapping): """ It is an immutable wrapper around ordered dictionaries that implements the complete :py:class:`collections.Mapping` interface. It can be used as a drop-in replacement for dictionaries where immutability and ordering are desired. """ def __init__(self, *args, **kwargs): self.__dict = OrderedDict(*args, **kwargs) self.__hash = None def __getitem__(self, key): return self.__dict[key] def __iter__(self): return iter(self.__dict) def __len__(self): return len(self.__dict) def __repr__(self): return '<FrozenOrderedDict %s>' % repr(self.__dict) def __hash__(self): if self.__hash is None: hashes = map(hash, self.items()) self.__hash = functools.reduce(operator.xor, hashes, 0) return self.__hash def get_wrapped(self): return self.__dict def _recursively_freeze(value): """ Recursively walks ``Mapping``s and ``list``s and converts them to ``_FrozenOrderedDict`` and ``tuples``, respectively. """ if isinstance(value, Mapping): return _FrozenOrderedDict(((k, _recursively_freeze(v)) for k, v in value.items())) elif isinstance(value, list): return tuple(_recursively_freeze(v) for v in value) return value class DictParameter(Parameter): """ Parameter whose value is a ``dict``. In the task definition, use .. code-block:: python class MyTask(luigi.Task): tags = luigi.DictParameter() def run(self): logging.info("Find server with role: %s", self.tags['role']) server = aws.ec2.find_my_resource(self.tags) At the command line, use .. code-block:: console $ luigi --module my_tasks MyTask --tags <JSON string> Simple example with two tags: .. code-block:: console $ luigi --module my_tasks MyTask --tags '{"role": "web", "env": "staging"}' It can be used to define dynamic parameters, when you do not know the exact list of your parameters (e.g. list of tags, that are dynamically constructed outside Luigi), or you have a complex parameter containing logically related values (like a database connection config). """ class _DictParamEncoder(JSONEncoder): """ JSON encoder for :py:class:`~DictParameter`, which makes :py:class:`~_FrozenOrderedDict` JSON serializable. """ def default(self, obj): if isinstance(obj, _FrozenOrderedDict): return obj.get_wrapped() return json.JSONEncoder.default(self, obj) def normalize(self, value): """ Ensure that dictionary parameter is converted to a _FrozenOrderedDict so it can be hashed. """ return _recursively_freeze(value) def parse(self, s): """ Parses an immutable and ordered ``dict`` from a JSON string using standard JSON library. We need to use an immutable dictionary, to create a hashable parameter and also preserve the internal structure of parsing. The traversal order of standard ``dict`` is undefined, which can result various string representations of this parameter, and therefore a different task id for the task containing this parameter. This is because task id contains the hash of parameters' JSON representation. :param s: String to be parse """ return json.loads(s, object_pairs_hook=_FrozenOrderedDict) def serialize(self, x): return json.dumps(x, cls=DictParameter._DictParamEncoder) class ListParameter(Parameter): """ Parameter whose value is a ``list``. In the task definition, use .. code-block:: python class MyTask(luigi.Task): grades = luigi.ListParameter() def run(self): sum = 0 for element in self.grades: sum += element avg = sum / len(self.grades) At the command line, use .. code-block:: console $ luigi --module my_tasks MyTask --grades <JSON string> Simple example with two grades: .. code-block:: console $ luigi --module my_tasks MyTask --grades '[100,70]' """ def normalize(self, x): """ Ensure that list parameter is converted to a tuple so it can be hashed. :param str x: the value to parse. :return: the normalized (hashable/immutable) value. """ return _recursively_freeze(x) def parse(self, x): """ Parse an individual value from the input. :param str x: the value to parse. :return: the parsed value. """ return list(json.loads(x)) def serialize(self, x): """ Opposite of :py:meth:`parse`. Converts the value ``x`` to a string. :param x: the value to serialize. """ return json.dumps(x) class TupleParameter(Parameter): """ Parameter whose value is a ``tuple`` or ``tuple`` of tuples. In the task definition, use .. code-block:: python class MyTask(luigi.Task): book_locations = luigi.TupleParameter() def run(self): for location in self.book_locations: print("Go to page %d, line %d" % (location[0], location[1])) At the command line, use .. code-block:: console $ luigi --module my_tasks MyTask --book_locations <JSON string> Simple example with two grades: .. code-block:: console $ luigi --module my_tasks MyTask --book_locations '((12,3),(4,15),(52,1))' """ def parse(self, x): """ Parse an individual value from the input. :param str x: the value to parse. :return: the parsed value. """ # Since the result of json.dumps(tuple) differs from a tuple string, we must handle either case. # A tuple string may come from a config file or from cli execution. # t = ((1, 2), (3, 4)) # t_str = '((1,2),(3,4))' # t_json_str = json.dumps(t) # t_json_str == '[[1, 2], [3, 4]]' # json.loads(t_json_str) == t # json.loads(t_str) == ValueError: No JSON object could be decoded # Therefore, if json.loads(x) returns a ValueError, try ast.literal_eval(x). # ast.literal_eval(t_str) == t try: return tuple(tuple(x) for x in json.loads(x)) # loop required to parse tuple of tuples except ValueError: return literal_eval(x) # if this causes an error, let that error be raised. def serialize(self, x): """ Opposite of :py:meth:`parse`. Converts the value ``x`` to a string. :param x: the value to serialize. """ return json.dumps(x) class NumericalParameter(Parameter): """ Parameter whose value is a number of the specified type, e.g. ``int`` or ``float`` and in the range specified. In the task definition, use .. code-block:: python class MyTask(luigi.Task): my_param_1 = luigi.NumericalParameter( var_type=int, min_value=-3, max_value=7) # -3 <= my_param_1 < 7 my_param_2 = luigi.NumericalParameter( var_type=int, min_value=-3, max_value=7, left_op=operator.lt, right_op=operator.le) # -3 < my_param_2 <= 7 At the command line, use .. code-block:: console $ luigi --module my_tasks MyTask --my-param-1 -3 --my-param-2 -2 """ def __init__(self, left_op=operator.le, right_op=operator.lt, *args, **kwargs): """ :param function var_type: The type of the input variable, e.g. int or float. :param min_value: The minimum value permissible in the accepted values range. May be inclusive or exclusive based on left_op parameter. This should be the same type as var_type. :param max_value: The maximum value permissible in the accepted values range. May be inclusive or exclusive based on right_op parameter. This should be the same type as var_type. :param function left_op: The comparison operator for the left-most comparison in the expression ``min_value left_op value right_op value``. This operator should generally be either ``operator.lt`` or ``operator.le``. Default: ``operator.le``. :param function right_op: The comparison operator for the right-most comparison in the expression ``min_value left_op value right_op value``. This operator should generally be either ``operator.lt`` or ``operator.le``. Default: ``operator.lt``. """ if "var_type" not in kwargs: raise ParameterException("var_type must be specified") self._var_type = kwargs.pop("var_type") if "min_value" not in kwargs: raise ParameterException("min_value must be specified") self._min_value = kwargs.pop("min_value") if "max_value" not in kwargs: raise ParameterException("max_value must be specified") self._max_value = kwargs.pop("max_value") self._left_op = left_op self._right_op = right_op self._permitted_range = ( "{var_type} in {left_endpoint}{min_value}, {max_value}{right_endpoint}".format( var_type=self._var_type.__name__, min_value=self._min_value, max_value=self._max_value, left_endpoint="[" if left_op == operator.le else "(", right_endpoint=")" if right_op == operator.lt else "]")) super(NumericalParameter, self).__init__(*args, **kwargs) if self.description: self.description += " " else: self.description = "" self.description += "permitted values: " + self._permitted_range def parse(self, s): value = self._var_type(s) if (self._left_op(self._min_value, value) and self._right_op(value, self._max_value)): return value else: raise ValueError( "{s} is not in the set of {permitted_range}".format( s=s, permitted_range=self._permitted_range)) class ChoiceParameter(Parameter): """ A parameter which takes two values: 1. an instance of :class:`~collections.Iterable` and 2. the class of the variables to convert to. In the task definition, use .. code-block:: python class MyTask(luigi.Task): my_param = luigi.ChoiceParameter(choices=[0.1, 0.2, 0.3], var_type=float) At the command line, use .. code-block:: console $ luigi --module my_tasks MyTask --my-param 0.1 Consider using :class:`~luigi.EnumParameter` for a typed, structured alternative. This class can perform the same role when all choices are the same type and transparency of parameter value on the command line is desired. """ def __init__(self, var_type=str, *args, **kwargs): """ :param function var_type: The type of the input variable, e.g. str, int, float, etc. Default: str :param choices: An iterable, all of whose elements are of `var_type` to restrict parameter choices to. """ if "choices" not in kwargs: raise ParameterException("A choices iterable must be specified") self._choices = set(kwargs.pop("choices")) self._var_type = var_type assert all(type(choice) is self._var_type for choice in self._choices), "Invalid type in choices" super(ChoiceParameter, self).__init__(*args, **kwargs) if self.description: self.description += " " else: self.description = "" self.description += ( "Choices: {" + ", ".join(str(choice) for choice in self._choices) + "}") def parse(self, s): var = self._var_type(s) if var in self._choices: return var else: raise ValueError("{s} is not a valid choice from {choices}".format( s=s, choices=self._choices)) # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ''' Parameters are one of the core concepts of Luigi. All Parameters sit on :class:`~luigi.task.Task` classes. See :ref:`Parameter` for more info on how to define parameters. ''' import abc import datetime import warnings import json from json import JSONEncoder from collections import OrderedDict, Mapping import operator import functools from ast import literal_eval try: from ConfigParser import NoOptionError, NoSectionError except ImportError: from configparser import NoOptionError, NoSectionError from luigi import task_register from luigi import six from luigi import configuration from luigi.cmdline_parser import CmdlineParser _no_value = object() class ParameterException(Exception): """ Base exception. """ pass class MissingParameterException(ParameterException): """ Exception signifying that there was a missing Parameter. """ pass class UnknownParameterException(ParameterException): """ Exception signifying that an unknown Parameter was supplied. """ pass class DuplicateParameterException(ParameterException): """ Exception signifying that a Parameter was specified multiple times. """ pass class Parameter(object): """ Parameter whose value is a ``str``, and a base class for other parameter types. Parameters are objects set on the Task class level to make it possible to parameterize tasks. For instance: .. code:: python class MyTask(luigi.Task): foo = luigi.Parameter() class RequiringTask(luigi.Task): def requires(self): return MyTask(foo="hello") def run(self): print(self.requires().foo) # prints "hello" This makes it possible to instantiate multiple tasks, eg ``MyTask(foo='bar')`` and ``MyTask(foo='baz')``. The task will then have the ``foo`` attribute set appropriately. When a task is instantiated, it will first use any argument as the value of the parameter, eg. if you instantiate ``a = TaskA(x=44)`` then ``a.x == 44``. When the value is not provided, the value will be resolved in this order of falling priority: * Any value provided on the command line: - To the root task (eg. ``--param xyz``) - Then to the class, using the qualified task name syntax (eg. ``--TaskA-param xyz``). * With ``[TASK_NAME]>PARAM_NAME: <serialized value>`` syntax. See :ref:`ParamConfigIngestion` * Any default value set using the ``default`` flag. Parameter objects may be reused, but you must then set the ``positional=False`` flag. """ _counter = 0 # non-atomically increasing counter used for ordering parameters. def __init__(self, default=_no_value, is_global=False, significant=True, description=None, config_path=None, positional=True, always_in_help=False, batch_method=None): """ :param default: the default value for this parameter. This should match the type of the Parameter, i.e. ``datetime.date`` for ``DateParameter`` or ``int`` for ``IntParameter``. By default, no default is stored and the value must be specified at runtime. :param bool significant: specify ``False`` if the parameter should not be treated as part of the unique identifier for a Task. An insignificant Parameter might also be used to specify a password or other sensitive information that should not be made public via the scheduler. Default: ``True``. :param str description: A human-readable string describing the purpose of this Parameter. For command-line invocations, this will be used as the `help` string shown to users. Default: ``None``. :param dict config_path: a dictionary with entries ``section`` and ``name`` specifying a config file entry from which to read the default value for this parameter. DEPRECATED. Default: ``None``. :param bool positional: If true, you can set the argument as a positional argument. It's true by default but we recommend ``positional=False`` for abstract base classes and similar cases. :param bool always_in_help: For the --help option in the command line parsing. Set true to always show in --help. :param function(iterable[A])->A batch_method: Method to combine an iterable of parsed parameter values into a single value. Used when receiving batched parameter lists from the scheduler. See :ref:`batch_method` """ self._default = default self._batch_method = batch_method if is_global: warnings.warn("is_global support is removed. Assuming positional=False", DeprecationWarning, stacklevel=2) positional = False self.significant = significant # Whether different values for this parameter will differentiate otherwise equal tasks self.positional = positional self.description = description self.always_in_help = always_in_help if config_path is not None and ('section' not in config_path or 'name' not in config_path): raise ParameterException('config_path must be a hash containing entries for section and name') self._config_path = config_path self._counter = Parameter._counter # We need to keep track of this to get the order right (see Task class) Parameter._counter += 1 def _get_value_from_config(self, section, name): """Loads the default from the config. Returns _no_value if it doesn't exist""" conf = configuration.get_config() try: value = conf.get(section, name) except (NoSectionError, NoOptionError): return _no_value return self.parse(value) def _get_value(self, task_name, param_name): for value, warn in self._value_iterator(task_name, param_name): if value != _no_value: if warn: warnings.warn(warn, DeprecationWarning) return value return _no_value def _value_iterator(self, task_name, param_name): """ Yield the parameter values, with optional deprecation warning as second tuple value. The parameter value will be whatever non-_no_value that is yielded first. """ cp_parser = CmdlineParser.get_instance() if cp_parser: dest = self._parser_global_dest(param_name, task_name) found = getattr(cp_parser.known_args, dest, None) yield (self._parse_or_no_value(found), None) yield (self._get_value_from_config(task_name, param_name), None) yield (self._get_value_from_config(task_name, param_name.replace('_', '-')), 'Configuration [{}] {} (with dashes) should be avoided. Please use underscores.'.format( task_name, param_name)) if self._config_path: yield (self._get_value_from_config(self._config_path['section'], self._config_path['name']), 'The use of the configuration [{}] {} is deprecated. Please use [{}] {}'.format( self._config_path['section'], self._config_path['name'], task_name, param_name)) yield (self._default, None) def has_task_value(self, task_name, param_name): return self._get_value(task_name, param_name) != _no_value def task_value(self, task_name, param_name): value = self._get_value(task_name, param_name) if value == _no_value: raise MissingParameterException("No default specified") else: return self.normalize(value) def _is_batchable(self): return self._batch_method is not None def parse(self, x): """ Parse an individual value from the input. The default implementation is the identity function, but subclasses should override this method for specialized parsing. :param str x: the value to parse. :return: the parsed value. """ return x # default impl def _parse_list(self, xs): """ Parse a list of values from the scheduler. Only possible if this is_batchable() is True. This will combine the list into a single parameter value using batch method. This should never need to be overridden. :param xs: list of values to parse and combine :return: the combined parsed values """ if not self._is_batchable(): raise NotImplementedError('No batch method found') elif not xs: raise ValueError('Empty parameter list passed to parse_list') else: return self._batch_method(map(self.parse, xs)) def serialize(self, x): """ Opposite of :py:meth:`parse`. Converts the value ``x`` to a string. :param x: the value to serialize. """ return str(x) def _warn_on_wrong_param_type(self, param_name, param_value): if self.__class__ != Parameter: return if not isinstance(param_value, six.string_types): warnings.warn('Parameter "{}" with value "{}" is not of type string.'.format(param_name, param_value)) def normalize(self, x): """ Given a parsed parameter value, normalizes it. The value can either be the result of parse(), the default value or arguments passed into the task's constructor by instantiation. This is very implementation defined, but can be used to validate/clamp valid values. For example, if you wanted to only accept even integers, and "correct" odd values to the nearest integer, you can implement normalize as ``x // 2 * 2``. """ return x # default impl def next_in_enumeration(self, _value): """ If your Parameter type has an enumerable ordering of values. You can choose to override this method. This method is used by the :py:mod:`luigi.execution_summary` module for pretty printing purposes. Enabling it to pretty print tasks like ``MyTask(num=1), MyTask(num=2), MyTask(num=3)`` to ``MyTask(num=1..3)``. :param value: The value :return: The next value, like "value + 1". Or ``None`` if there's no enumerable ordering. """ return None def _parse_or_no_value(self, x): if not x: return _no_value else: return self.parse(x) @staticmethod def _parser_global_dest(param_name, task_name): return task_name + '_' + param_name @staticmethod def _parser_action(): return "store" _UNIX_EPOCH = datetime.datetime.utcfromtimestamp(0) class _DateParameterBase(Parameter): """ Base class Parameter for date (not datetime). """ def __init__(self, interval=1, start=None, **kwargs): super(_DateParameterBase, self).__init__(**kwargs) self.interval = interval self.start = start if start is not None else _UNIX_EPOCH.date() @abc.abstractproperty def date_format(self): """ Override me with a :py:meth:`~datetime.date.strftime` string. """ pass def parse(self, s): """ Parses a date string formatted like ``YYYY-MM-DD``. """ return datetime.datetime.strptime(s, self.date_format).date() def serialize(self, dt): """ Converts the date to a string using the :py:attr:`~_DateParameterBase.date_format`. """ if dt is None: return str(dt) return dt.strftime(self.date_format) class DateParameter(_DateParameterBase): """ Parameter whose value is a :py:class:`~datetime.date`. A DateParameter is a Date string formatted ``YYYY-MM-DD``. For example, ``2013-07-10`` specifies July 10, 2013. DateParameters are 90% of the time used to be interpolated into file system paths or the like. Here is a gentle reminder of how to interpolate date parameters into strings: .. code:: python class MyTask(luigi.Task): date = luigi.DateParameter() def run(self): templated_path = "/my/path/to/my/dataset/{date:%Y/%m/%d}/" instantiated_path = templated_path.format(date=self.date) # print(instantiated_path) --> /my/path/to/my/dataset/2016/06/09/ # ... use instantiated_path ... To set this parameter to default to the current day. You can write code like this: .. code:: python import datetime class MyTask(luigi.Task): date = luigi.DateParameter(default=datetime.date.today()) """ date_format = '%Y-%m-%d' def next_in_enumeration(self, value): return value + datetime.timedelta(days=self.interval) def normalize(self, value): if value is None: return None if isinstance(value, datetime.datetime): value = value.date() delta = (value - self.start).days % self.interval return value - datetime.timedelta(days=delta) class MonthParameter(DateParameter): """ Parameter whose value is a :py:class:`~datetime.date`, specified to the month (day of :py:class:`~datetime.date` is "rounded" to first of the month). A MonthParameter is a Date string formatted ``YYYY-MM``. For example, ``2013-07`` specifies July of 2013. """ date_format = '%Y-%m' def _add_months(self, date, months): """ Add ``months`` months to ``date``. Unfortunately we can't use timedeltas to add months because timedelta counts in days and there's no foolproof way to add N months in days without counting the number of days per month. """ year = date.year + (date.month + months - 1) // 12 month = (date.month + months - 1) % 12 + 1 return datetime.date(year=year, month=month, day=1) def next_in_enumeration(self, value): return self._add_months(value, self.interval) def normalize(self, value): if value is None: return None months_since_start = (value.year - self.start.year) * 12 + (value.month - self.start.month) months_since_start -= months_since_start % self.interval return self._add_months(self.start, months_since_start) class YearParameter(DateParameter): """ Parameter whose value is a :py:class:`~datetime.date`, specified to the year (day and month of :py:class:`~datetime.date` is "rounded" to first day of the year). A YearParameter is a Date string formatted ``YYYY``. """ date_format = '%Y' def next_in_enumeration(self, value): return value.replace(year=value.year + self.interval) def normalize(self, value): if value is None: return None delta = (value.year - self.start.year) % self.interval return datetime.date(year=value.year - delta, month=1, day=1) class _DatetimeParameterBase(Parameter): """ Base class Parameter for datetime """ def __init__(self, interval=1, start=None, **kwargs): super(_DatetimeParameterBase, self).__init__(**kwargs) self.interval = interval self.start = start if start is not None else _UNIX_EPOCH @abc.abstractproperty def date_format(self): """ Override me with a :py:meth:`~datetime.date.strftime` string. """ pass @abc.abstractproperty def _timedelta(self): """ How to move one interval of this type forward (i.e. not counting self.interval). """ pass def parse(self, s): """ Parses a string to a :py:class:`~datetime.datetime`. """ return datetime.datetime.strptime(s, self.date_format) def serialize(self, dt): """ Converts the date to a string using the :py:attr:`~_DatetimeParameterBase.date_format`. """ if dt is None: return str(dt) return dt.strftime(self.date_format) def normalize(self, dt): """ Clamp dt to every Nth :py:attr:`~_DatetimeParameterBase.interval` starting at :py:attr:`~_DatetimeParameterBase.start`. """ if dt is None: return None dt = dt.replace(microsecond=0) # remove microseconds, to avoid float rounding issues. delta = (dt - self.start).total_seconds() granularity = (self._timedelta * self.interval).total_seconds() return dt - datetime.timedelta(seconds=delta % granularity) def next_in_enumeration(self, value): return value + self._timedelta * self.interval class DateHourParameter(_DatetimeParameterBase): """ Parameter whose value is a :py:class:`~datetime.datetime` specified to the hour. A DateHourParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted date and time specified to the hour. For example, ``2013-07-10T19`` specifies July 10, 2013 at 19:00. """ date_format = '%Y-%m-%dT%H' # ISO 8601 is to use 'T' _timedelta = datetime.timedelta(hours=1) class DateMinuteParameter(_DatetimeParameterBase): """ Parameter whose value is a :py:class:`~datetime.datetime` specified to the minute. A DateMinuteParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted date and time specified to the minute. For example, ``2013-07-10T1907`` specifies July 10, 2013 at 19:07. The interval parameter can be used to clamp this parameter to every N minutes, instead of every minute. """ date_format = '%Y-%m-%dT%H%M' _timedelta = datetime.timedelta(minutes=1) deprecated_date_format = '%Y-%m-%dT%HH%M' def parse(self, s): try: value = datetime.datetime.strptime(s, self.deprecated_date_format) warnings.warn( 'Using "H" between hours and minutes is deprecated, omit it instead.', DeprecationWarning, stacklevel=2 ) return value except ValueError: return super(DateMinuteParameter, self).parse(s) class DateSecondParameter(_DatetimeParameterBase): """ Parameter whose value is a :py:class:`~datetime.datetime` specified to the second. A DateSecondParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted date and time specified to the second. For example, ``2013-07-10T190738`` specifies July 10, 2013 at 19:07:38. The interval parameter can be used to clamp this parameter to every N seconds, instead of every second. """ date_format = '%Y-%m-%dT%H%M%S' _timedelta = datetime.timedelta(seconds=1) class IntParameter(Parameter): """ Parameter whose value is an ``int``. """ def parse(self, s): """ Parses an ``int`` from the string using ``int()``. """ return int(s) def next_in_enumeration(self, value): return value + 1 class FloatParameter(Parameter): """ Parameter whose value is a ``float``. """ def parse(self, s): """ Parses a ``float`` from the string using ``float()``. """ return float(s) class BoolParameter(Parameter): """ A Parameter whose value is a ``bool``. This parameter have an implicit default value of ``False``. """ def __init__(self, *args, **kwargs): super(BoolParameter, self).__init__(*args, **kwargs) if self._default == _no_value: self._default = False def parse(self, s): """ Parses a ``bool`` from the string, matching 'true' or 'false' ignoring case. """ return {'true': True, 'false': False}[str(s).lower()] def normalize(self, value): # coerce anything truthy to True return bool(value) if value is not None else None @staticmethod def _parser_action(): return 'store_true' class DateIntervalParameter(Parameter): """ A Parameter whose value is a :py:class:`~luigi.date_interval.DateInterval`. Date Intervals are specified using the ISO 8601 date notation for dates (eg. "2015-11-04"), months (eg. "2015-05"), years (eg. "2015"), or weeks (eg. "2015-W35"). In addition, it also supports arbitrary date intervals provided as two dates separated with a dash (eg. "2015-11-04-2015-12-04"). """ def parse(self, s): """ Parses a :py:class:`~luigi.date_interval.DateInterval` from the input. see :py:mod:`luigi.date_interval` for details on the parsing of DateIntervals. """ # TODO: can we use xml.utils.iso8601 or something similar? from luigi import date_interval as d for cls in [d.Year, d.Month, d.Week, d.Date, d.Custom]: i = cls.parse(s) if i: return i raise ValueError('Invalid date interval - could not be parsed') class TimeDeltaParameter(Parameter): """ Class that maps to timedelta using strings in any of the following forms: * ``n {w[eek[s]]|d[ay[s]]|h[our[s]]|m[inute[s]|s[second[s]]}`` (e.g. "1 week 2 days" or "1 h") Note: multiple arguments must be supplied in longest to shortest unit order * ISO 8601 duration ``PnDTnHnMnS`` (each field optional, years and months not supported) * ISO 8601 duration ``PnW`` See https://en.wikipedia.org/wiki/ISO_8601#Durations """ def _apply_regex(self, regex, input): import re re_match = re.match(regex, input) if re_match and any(re_match.groups()): kwargs = {} has_val = False for k, v in six.iteritems(re_match.groupdict(default="0")): val = int(v) if val > -1: has_val = True kwargs[k] = val if has_val: return datetime.timedelta(**kwargs) def _parseIso8601(self, input): def field(key): return r"(?P<%s>\d+)%s" % (key, key[0].upper()) def optional_field(key): return "(%s)?" % field(key) # A little loose: ISO 8601 does not allow weeks in combination with other fields, but this regex does (as does python timedelta) regex = "P(%s|%s(T%s)?)" % (field("weeks"), optional_field("days"), "".join([optional_field(key) for key in ["hours", "minutes", "seconds"]])) return self._apply_regex(regex, input) def _parseSimple(self, input): keys = ["weeks", "days", "hours", "minutes", "seconds"] # Give the digits a regex group name from the keys, then look for text with the first letter of the key, # optionally followed by the rest of the word, with final char (the "s") optional regex = "".join([r"((?P<%s>\d+) ?%s(%s)?(%s)? ?)?" % (k, k[0], k[1:-1], k[-1]) for k in keys]) return self._apply_regex(regex, input) def parse(self, input): """ Parses a time delta from the input. See :py:class:`TimeDeltaParameter` for details on supported formats. """ result = self._parseIso8601(input) if not result: result = self._parseSimple(input) if result is not None: return result else: raise ParameterException("Invalid time delta - could not parse %s" % input) def serialize(self, x): """ Converts datetime.timedelta to a string :param x: the value to serialize. """ weeks = x.days // 7 days = x.days % 7 hours = x.seconds // 3600 minutes = (x.seconds % 3600) // 60 seconds = (x.seconds % 3600) % 60 result = "{} w {} d {} h {} m {} s".format(weeks, days, hours, minutes, seconds) return result def _warn_on_wrong_param_type(self, param_name, param_value): if self.__class__ != TimeDeltaParameter: return if not isinstance(param_value, datetime.timedelta): warnings.warn('Parameter "{}" with value "{}" is not of type timedelta.'.format(param_name, param_value)) class TaskParameter(Parameter): """ A parameter that takes another luigi task class. When used programatically, the parameter should be specified directly with the :py:class:`luigi.task.Task` (sub) class. Like ``MyMetaTask(my_task_param=my_tasks.MyTask)``. On the command line, you specify the :py:meth:`luigi.task.Task.get_task_family`. Like .. code-block:: console $ luigi --module my_tasks MyMetaTask --my_task_param my_namespace.MyTask Where ``my_namespace.MyTask`` is defined in the ``my_tasks`` python module. When the :py:class:`luigi.task.Task` class is instantiated to an object. The value will always be a task class (and not a string). """ def parse(self, input): """ Parse a task_famly using the :class:`~luigi.task_register.Register` """ return task_register.Register.get_task_cls(input) def serialize(self, cls): """ Converts the :py:class:`luigi.task.Task` (sub) class to its family name. """ return cls.get_task_family() class EnumParameter(Parameter): """ A parameter whose value is an :class:`~enum.Enum`. In the task definition, use .. code-block:: python class Model(enum.Enum): Honda = 1 Volvo = 2 class MyTask(luigi.Task): my_param = luigi.EnumParameter(enum=Model) At the command line, use, .. code-block:: console $ luigi --module my_tasks MyTask --my-param Honda """ def __init__(self, *args, **kwargs): if 'enum' not in kwargs: raise ParameterException('An enum class must be specified.') self._enum = kwargs.pop('enum') super(EnumParameter, self).__init__(*args, **kwargs) def parse(self, s): try: return self._enum[s] except KeyError: raise ValueError('Invalid enum value - could not be parsed') def serialize(self, e): return e.name class _FrozenOrderedDict(Mapping): """ It is an immutable wrapper around ordered dictionaries that implements the complete :py:class:`collections.Mapping` interface. It can be used as a drop-in replacement for dictionaries where immutability and ordering are desired. """ def __init__(self, *args, **kwargs): self.__dict = OrderedDict(*args, **kwargs) self.__hash = None def __getitem__(self, key): return self.__dict[key] def __iter__(self): return iter(self.__dict) def __len__(self): return len(self.__dict) def __repr__(self): return '<FrozenOrderedDict %s>' % repr(self.__dict) def __hash__(self): if self.__hash is None: hashes = map(hash, self.items()) self.__hash = functools.reduce(operator.xor, hashes, 0) return self.__hash def get_wrapped(self): return self.__dict def _recursively_freeze(value): """ Recursively walks ``Mapping``s and ``list``s and converts them to ``_FrozenOrderedDict`` and ``tuples``, respectively. """ if isinstance(value, Mapping): return _FrozenOrderedDict(((k, _recursively_freeze(v)) for k, v in value.items())) elif isinstance(value, list) or isinstance(value, tuple): return tuple(_recursively_freeze(v) for v in value) return value class _DictParamEncoder(JSONEncoder): """ JSON encoder for :py:class:`~DictParameter`, which makes :py:class:`~_FrozenOrderedDict` JSON serializable. """ def default(self, obj): if isinstance(obj, _FrozenOrderedDict): return obj.get_wrapped() return json.JSONEncoder.default(self, obj) class DictParameter(Parameter): """ Parameter whose value is a ``dict``. In the task definition, use .. code-block:: python class MyTask(luigi.Task): tags = luigi.DictParameter() def run(self): logging.info("Find server with role: %s", self.tags['role']) server = aws.ec2.find_my_resource(self.tags) At the command line, use .. code-block:: console $ luigi --module my_tasks MyTask --tags <JSON string> Simple example with two tags: .. code-block:: console $ luigi --module my_tasks MyTask --tags '{"role": "web", "env": "staging"}' It can be used to define dynamic parameters, when you do not know the exact list of your parameters (e.g. list of tags, that are dynamically constructed outside Luigi), or you have a complex parameter containing logically related values (like a database connection config). """ def normalize(self, value): """ Ensure that dictionary parameter is converted to a _FrozenOrderedDict so it can be hashed. """ return _recursively_freeze(value) def parse(self, s): """ Parses an immutable and ordered ``dict`` from a JSON string using standard JSON library. We need to use an immutable dictionary, to create a hashable parameter and also preserve the internal structure of parsing. The traversal order of standard ``dict`` is undefined, which can result various string representations of this parameter, and therefore a different task id for the task containing this parameter. This is because task id contains the hash of parameters' JSON representation. :param s: String to be parse """ return json.loads(s, object_pairs_hook=_FrozenOrderedDict) def serialize(self, x): return json.dumps(x, cls=_DictParamEncoder) class ListParameter(Parameter): """ Parameter whose value is a ``list``. In the task definition, use .. code-block:: python class MyTask(luigi.Task): grades = luigi.ListParameter() def run(self): sum = 0 for element in self.grades: sum += element avg = sum / len(self.grades) At the command line, use .. code-block:: console $ luigi --module my_tasks MyTask --grades <JSON string> Simple example with two grades: .. code-block:: console $ luigi --module my_tasks MyTask --grades '[100,70]' """ def normalize(self, x): """ Ensure that struct is recursively converted to a tuple so it can be hashed. :param str x: the value to parse. :return: the normalized (hashable/immutable) value. """ return _recursively_freeze(x) def parse(self, x): """ Parse an individual value from the input. :param str x: the value to parse. :return: the parsed value. """ return list(json.loads(x, object_pairs_hook=_FrozenOrderedDict)) def serialize(self, x): """ Opposite of :py:meth:`parse`. Converts the value ``x`` to a string. :param x: the value to serialize. """ return json.dumps(x, cls=_DictParamEncoder) class TupleParameter(ListParameter): """ Parameter whose value is a ``tuple`` or ``tuple`` of tuples. In the task definition, use .. code-block:: python class MyTask(luigi.Task): book_locations = luigi.TupleParameter() def run(self): for location in self.book_locations: print("Go to page %d, line %d" % (location[0], location[1])) At the command line, use .. code-block:: console $ luigi --module my_tasks MyTask --book_locations <JSON string> Simple example with two grades: .. code-block:: console $ luigi --module my_tasks MyTask --book_locations '((12,3),(4,15),(52,1))' """ def parse(self, x): """ Parse an individual value from the input. :param str x: the value to parse. :return: the parsed value. """ # Since the result of json.dumps(tuple) differs from a tuple string, we must handle either case. # A tuple string may come from a config file or from cli execution. # t = ((1, 2), (3, 4)) # t_str = '((1,2),(3,4))' # t_json_str = json.dumps(t) # t_json_str == '[[1, 2], [3, 4]]' # json.loads(t_json_str) == t # json.loads(t_str) == ValueError: No JSON object could be decoded # Therefore, if json.loads(x) returns a ValueError, try ast.literal_eval(x). # ast.literal_eval(t_str) == t try: # loop required to parse tuple of tuples return tuple(tuple(x) for x in json.loads(x, object_pairs_hook=_FrozenOrderedDict)) except ValueError: return literal_eval(x) # if this causes an error, let that error be raised. class NumericalParameter(Parameter): """ Parameter whose value is a number of the specified type, e.g. ``int`` or ``float`` and in the range specified. In the task definition, use .. code-block:: python class MyTask(luigi.Task): my_param_1 = luigi.NumericalParameter( var_type=int, min_value=-3, max_value=7) # -3 <= my_param_1 < 7 my_param_2 = luigi.NumericalParameter( var_type=int, min_value=-3, max_value=7, left_op=operator.lt, right_op=operator.le) # -3 < my_param_2 <= 7 At the command line, use .. code-block:: console $ luigi --module my_tasks MyTask --my-param-1 -3 --my-param-2 -2 """ def __init__(self, left_op=operator.le, right_op=operator.lt, *args, **kwargs): """ :param function var_type: The type of the input variable, e.g. int or float. :param min_value: The minimum value permissible in the accepted values range. May be inclusive or exclusive based on left_op parameter. This should be the same type as var_type. :param max_value: The maximum value permissible in the accepted values range. May be inclusive or exclusive based on right_op parameter. This should be the same type as var_type. :param function left_op: The comparison operator for the left-most comparison in the expression ``min_value left_op value right_op value``. This operator should generally be either ``operator.lt`` or ``operator.le``. Default: ``operator.le``. :param function right_op: The comparison operator for the right-most comparison in the expression ``min_value left_op value right_op value``. This operator should generally be either ``operator.lt`` or ``operator.le``. Default: ``operator.lt``. """ if "var_type" not in kwargs: raise ParameterException("var_type must be specified") self._var_type = kwargs.pop("var_type") if "min_value" not in kwargs: raise ParameterException("min_value must be specified") self._min_value = kwargs.pop("min_value") if "max_value" not in kwargs: raise ParameterException("max_value must be specified") self._max_value = kwargs.pop("max_value") self._left_op = left_op self._right_op = right_op self._permitted_range = ( "{var_type} in {left_endpoint}{min_value}, {max_value}{right_endpoint}".format( var_type=self._var_type.__name__, min_value=self._min_value, max_value=self._max_value, left_endpoint="[" if left_op == operator.le else "(", right_endpoint=")" if right_op == operator.lt else "]")) super(NumericalParameter, self).__init__(*args, **kwargs) if self.description: self.description += " " else: self.description = "" self.description += "permitted values: " + self._permitted_range def parse(self, s): value = self._var_type(s) if (self._left_op(self._min_value, value) and self._right_op(value, self._max_value)): return value else: raise ValueError( "{s} is not in the set of {permitted_range}".format( s=s, permitted_range=self._permitted_range)) class ChoiceParameter(Parameter): """ A parameter which takes two values: 1. an instance of :class:`~collections.Iterable` and 2. the class of the variables to convert to. In the task definition, use .. code-block:: python class MyTask(luigi.Task): my_param = luigi.ChoiceParameter(choices=[0.1, 0.2, 0.3], var_type=float) At the command line, use .. code-block:: console $ luigi --module my_tasks MyTask --my-param 0.1 Consider using :class:`~luigi.EnumParameter` for a typed, structured alternative. This class can perform the same role when all choices are the same type and transparency of parameter value on the command line is desired. """ def __init__(self, var_type=str, *args, **kwargs): """ :param function var_type: The type of the input variable, e.g. str, int, float, etc. Default: str :param choices: An iterable, all of whose elements are of `var_type` to restrict parameter choices to. """ if "choices" not in kwargs: raise ParameterException("A choices iterable must be specified") self._choices = set(kwargs.pop("choices")) self._var_type = var_type assert all(type(choice) is self._var_type for choice in self._choices), "Invalid type in choices" super(ChoiceParameter, self).__init__(*args, **kwargs) if self.description: self.description += " " else: self.description = "" self.description += ( "Choices: {" + ", ".join(str(choice) for choice in self._choices) + "}") def parse(self, s): var = self._var_type(s) if var in self._choices: return var else: raise ValueError("{s} is not a valid choice from {choices}".format( s=s, choices=self._choices))
BugsInPy/BugsInPy/temp/projects/luigi/bug-6-fixed/luigi/luigi/parameter.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-6-buggy/luigi/luigi/parameter.py
luigi-bug-16
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The system for scheduling tasks and executing them in order. Deals with dependencies, priorities, resources, etc. The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them. See :doc:`/central_scheduler` for more info. """ import collections try: import cPickle as pickle except ImportError: import pickle import datetime import functools import itertools import logging import os import time from luigi import six from luigi import configuration from luigi import notifications from luigi import parameter from luigi import task_history as history from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN from luigi.task import Config logger = logging.getLogger("luigi.server") class Scheduler(object): """ Abstract base class. Note that the methods all take string arguments, not Task objects... """"" add_task = NotImplemented get_work = NotImplemented ping = NotImplemented UPSTREAM_RUNNING = 'UPSTREAM_RUNNING' UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT' UPSTREAM_FAILED = 'UPSTREAM_FAILED' UPSTREAM_DISABLED = 'UPSTREAM_DISABLED' UPSTREAM_SEVERITY_ORDER = ( '', UPSTREAM_RUNNING, UPSTREAM_MISSING_INPUT, UPSTREAM_FAILED, UPSTREAM_DISABLED, ) UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index STATUS_TO_UPSTREAM_MAP = { FAILED: UPSTREAM_FAILED, RUNNING: UPSTREAM_RUNNING, PENDING: UPSTREAM_MISSING_INPUT, DISABLED: UPSTREAM_DISABLED, } class scheduler(Config): # TODO(erikbern): the config_path is needed for backwards compatilibity. We should drop the compatibility # at some point (in particular this would force users to replace all dashes with underscores in the config) retry_delay = parameter.FloatParameter(default=900.0) remove_delay = parameter.FloatParameter(default=600.0) worker_disconnect_delay = parameter.FloatParameter(default=60.0) state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle') # Jobs are disabled if we see more than disable_failures failures in disable_window seconds. # These disables last for disable_persist seconds. disable_window = parameter.IntParameter(default=3600, config_path=dict(section='scheduler', name='disable-window-seconds')) disable_failures = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-num-failures')) disable_hard_timeout = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-hard-timeout')) disable_persist = parameter.IntParameter(default=86400, config_path=dict(section='scheduler', name='disable-persist-seconds')) max_shown_tasks = parameter.IntParameter(default=100000) max_graph_nodes = parameter.IntParameter(default=100000) prune_done_tasks = parameter.BoolParameter(default=False) record_task_history = parameter.BoolParameter(default=False) prune_on_get_work = parameter.BoolParameter(default=False) def fix_time(x): # Backwards compatibility for a fix in Dec 2014. Prior to the fix, pickled state might store datetime objects # Let's remove this function soon if isinstance(x, datetime.datetime): return time.mktime(x.timetuple()) else: return x class Failures(object): """ This class tracks the number of failures in a given time window. Failures added are marked with the current timestamp, and this class counts the number of failures in a sliding time window ending at the present. """ def __init__(self, window): """ Initialize with the given window. :param window: how long to track failures for, as a float (number of seconds). """ self.window = window self.failures = collections.deque() self.first_failure_time = None def add_failure(self): """ Add a failure event with the current timestamp. """ failure_time = time.time() if not self.first_failure_time: self.first_failure_time = failure_time self.failures.append(failure_time) def num_failures(self): """ Return the number of failures in the window. """ min_time = time.time() - self.window while self.failures and fix_time(self.failures[0]) < min_time: self.failures.popleft() return len(self.failures) def clear(self): """ Clear the failure queue. """ self.failures.clear() def _get_default(x, default): if x is not None: return x else: return default class Task(object): def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None, params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None, tracking_url=None): self.id = task_id self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active) self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active if deps is None: self.deps = set() else: self.deps = set(deps) self.status = status # PENDING, RUNNING, FAILED or DONE self.time = time.time() # Timestamp when task was first added self.retry = None self.remove = None self.worker_running = None # the worker id that is currently running the task or None self.time_running = None # Timestamp when picked up by worker self.expl = None self.priority = priority self.resources = _get_default(resources, {}) self.family = family self.module = module self.params = _get_default(params, {}) self.disable_failures = disable_failures self.disable_hard_timeout = disable_hard_timeout self.failures = Failures(disable_window) self.tracking_url = tracking_url self.scheduler_disable_time = None self.runnable = False def __repr__(self): return "Task(%r)" % vars(self) def add_failure(self): self.failures.add_failure() def has_excessive_failures(self): if (self.failures.first_failure_time is not None and self.disable_hard_timeout): if (time.time() >= self.failures.first_failure_time + self.disable_hard_timeout): return True if self.failures.num_failures() >= self.disable_failures: return True return False def can_disable(self): return (self.disable_failures is not None or self.disable_hard_timeout is not None) class Worker(object): """ Structure for tracking worker activity and keeping their references. """ def __init__(self, worker_id, last_active=None): self.id = worker_id self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host) self.last_active = last_active or time.time() # seconds since epoch self.last_get_work = None self.started = time.time() # seconds since epoch self.tasks = set() # task objects self.info = {} def add_info(self, info): self.info.update(info) def update(self, worker_reference, get_work=False): if worker_reference: self.reference = worker_reference self.last_active = time.time() if get_work: self.last_get_work = time.time() def prune(self, config): # Delete workers that haven't said anything for a while (probably killed) if self.last_active + config.worker_disconnect_delay < time.time(): return True def get_pending_tasks(self, state): """ Get PENDING (and RUNNING) tasks for this worker. You have to pass in the state for optimization reasons. """ if len(self.tasks) < state.num_pending_tasks(): return six.moves.filter(lambda task: task.status in [PENDING, RUNNING], self.tasks) else: return state.get_pending_tasks() def is_trivial_worker(self, state): """ If it's not an assistant having only tasks that are without requirements. We have to pass the state parameter for optimization reasons. """ if self.assistant: return False return all(not task.resources for task in self.get_pending_tasks(state)) @property def assistant(self): return self.info.get('assistant', False) def __str__(self): return self.id class SimpleTaskState(object): """ Keep track of the current state and handle persistance. The point of this class is to enable other ways to keep state, eg. by using a database These will be implemented by creating an abstract base class that this and other classes inherit from. """ def __init__(self, state_path): self._state_path = state_path self._tasks = {} # map from id to a Task object self._status_tasks = collections.defaultdict(dict) self._active_workers = {} # map from id to a Worker object def get_state(self): return self._tasks, self._active_workers def set_state(self, state): self._tasks, self._active_workers = state def dump(self): try: with open(self._state_path, 'wb') as fobj: pickle.dump(self.get_state(), fobj) except IOError: logger.warning("Failed saving scheduler state", exc_info=1) else: logger.info("Saved state in %s", self._state_path) # prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control? def load(self): if os.path.exists(self._state_path): logger.info("Attempting to load state from %s", self._state_path) try: with open(self._state_path, 'rb') as fobj: state = pickle.load(fobj) except BaseException: logger.exception("Error when loading state. Starting from clean slate.") return self.set_state(state) self._status_tasks = collections.defaultdict(dict) for task in six.itervalues(self._tasks): self._status_tasks[task.status][task.id] = task # Convert from old format # TODO: this is really ugly, we need something more future-proof # Every time we add an attribute to the Worker or Task class, this # code needs to be updated # Compatibility since 2014-06-02 for k, v in six.iteritems(self._active_workers): if isinstance(v, float): self._active_workers[k] = Worker(worker_id=k, last_active=v) # Compatibility since 2015-05-28 if any(not hasattr(w, 'tasks') for k, w in six.iteritems(self._active_workers)): # If you load from an old format where Workers don't contain tasks. for k, worker in six.iteritems(self._active_workers): worker.tasks = set() for task in six.itervalues(self._tasks): for worker_id in task.workers: self._active_workers[worker_id].tasks.add(task) # Compatibility since 2015-04-28 if any(not hasattr(t, 'disable_hard_timeout') for t in six.itervalues(self._tasks)): for t in six.itervalues(self._tasks): t.disable_hard_timeout = None else: logger.info("No prior state file exists at %s. Starting with clean slate", self._state_path) def get_active_tasks(self, status=None): if status: for task in six.itervalues(self._status_tasks[status]): yield task else: for task in six.itervalues(self._tasks): yield task def get_running_tasks(self): return six.itervalues(self._status_tasks[RUNNING]) def get_pending_tasks(self): return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status]) for status in [PENDING, RUNNING]) def num_pending_tasks(self): """ Return how many tasks are PENDING + RUNNING. O(1). """ return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING]) def get_task(self, task_id, default=None, setdefault=None): if setdefault: task = self._tasks.setdefault(task_id, setdefault) self._status_tasks[task.status][task.id] = task return task else: return self._tasks.get(task_id, default) def has_task(self, task_id): return task_id in self._tasks def re_enable(self, task, config=None): task.scheduler_disable_time = None task.failures.clear() if config: self.set_status(task, FAILED, config) task.failures.clear() def set_status(self, task, new_status, config=None): if new_status == FAILED: assert config is not None if new_status == DISABLED and task.status == RUNNING: return if task.status == DISABLED: if new_status == DONE: self.re_enable(task) # don't allow workers to override a scheduler disable elif task.scheduler_disable_time is not None and new_status != DISABLED: return if new_status == FAILED and task.can_disable() and task.status != DISABLED: task.add_failure() if task.has_excessive_failures(): task.scheduler_disable_time = time.time() new_status = DISABLED notifications.send_error_email( 'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id), '{task} failed {failures} times in the last {window} seconds, so it is being ' 'disabled for {persist} seconds'.format( failures=config.disable_failures, task=task.id, window=config.disable_window, persist=config.disable_persist, )) elif new_status == DISABLED: task.scheduler_disable_time = None self._status_tasks[task.status].pop(task.id) self._status_tasks[new_status][task.id] = task task.status = new_status def fail_dead_worker_task(self, task, config, assistants): # If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants: logger.info("Task %r is marked as running by disconnected worker %r -> marking as " "FAILED with retry delay of %rs", task.id, task.worker_running, config.retry_delay) task.worker_running = None self.set_status(task, FAILED, config) task.retry = time.time() + config.retry_delay def prune(self, task, config): remove = False # Mark tasks with no remaining active stakeholders for deletion if not task.stakeholders: if task.remove is None: logger.info("Task %r has stakeholders %r but none remain connected -> will remove " "task in %s seconds", task.id, task.stakeholders, config.remove_delay) task.remove = time.time() + config.remove_delay # Re-enable task after the disable time expires if task.status == DISABLED and task.scheduler_disable_time is not None: if time.time() - fix_time(task.scheduler_disable_time) > config.disable_persist: self.re_enable(task, config) # Remove tasks that have no stakeholders if task.remove and time.time() > task.remove: logger.info("Removing task %r (no connected stakeholders)", task.id) remove = True # Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0 if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time(): self.set_status(task, PENDING, config) return remove def inactivate_tasks(self, delete_tasks): # The terminology is a bit confusing: we used to "delete" tasks when they became inactive, # but with a pluggable state storage, you might very well want to keep some history of # older tasks as well. That's why we call it "inactivate" (as in the verb) for task in delete_tasks: task_obj = self._tasks.pop(task) self._status_tasks[task_obj.status].pop(task) def get_active_workers(self, last_active_lt=None, last_get_work_gt=None): for worker in six.itervalues(self._active_workers): if last_active_lt is not None and worker.last_active >= last_active_lt: continue last_get_work = getattr(worker, 'last_get_work', None) if last_get_work_gt is not None and ( last_get_work is None or last_get_work <= last_get_work_gt): continue yield worker def get_assistants(self, last_active_lt=None): return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt)) def get_worker_ids(self): return self._active_workers.keys() # only used for unit tests def get_worker(self, worker_id): return self._active_workers.setdefault(worker_id, Worker(worker_id)) def inactivate_workers(self, delete_workers): # Mark workers as inactive for worker in delete_workers: self._active_workers.pop(worker) # remove workers from tasks for task in self.get_active_tasks(): task.stakeholders.difference_update(delete_workers) task.workers.difference_update(delete_workers) def get_necessary_tasks(self): necessary_tasks = set() for task in self.get_active_tasks(): if task.status not in (DONE, DISABLED) or \ getattr(task, 'scheduler_disable_time', None) is not None: necessary_tasks.update(task.deps) necessary_tasks.add(task.id) return necessary_tasks class CentralPlannerScheduler(Scheduler): """ Async scheduler that can handle multiple workers, etc. Can be run locally or on a server (using RemoteScheduler + server.Server). """ def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs): """ Keyword Arguments: :param config: an object of class "scheduler" or None (in which the global instance will be used) :param resources: a dict of str->int constraints :param task_history_override: ignore config and use this object as the task history """ self._config = config or scheduler(**kwargs) self._state = SimpleTaskState(self._config.state_path) if task_history_impl: self._task_history = task_history_impl elif self._config.record_task_history: from luigi import db_task_history # Needs sqlalchemy, thus imported here self._task_history = db_task_history.DbTaskHistory() else: self._task_history = history.NopHistory() self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter? self._make_task = functools.partial( Task, disable_failures=self._config.disable_failures, disable_hard_timeout=self._config.disable_hard_timeout, disable_window=self._config.disable_window) self._worker_requests = {} def load(self): self._state.load() def dump(self): self._state.dump() def prune(self): logger.info("Starting pruning of task graph") remove_workers = [] for worker in self._state.get_active_workers(): if worker.prune(self._config): logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay) remove_workers.append(worker.id) self._state.inactivate_workers(remove_workers) assistant_ids = set(w.id for w in self._state.get_assistants()) remove_tasks = [] if assistant_ids: necessary_tasks = self._state.get_necessary_tasks() else: necessary_tasks = () for task in self._state.get_active_tasks(): self._state.fail_dead_worker_task(task, self._config, assistant_ids) if task.id not in necessary_tasks and self._state.prune(task, self._config): remove_tasks.append(task.id) self._state.inactivate_tasks(remove_tasks) logger.info("Done pruning task graph") def update(self, worker_id, worker_reference=None, get_work=False): """ Keep track of whenever the worker was last active. """ worker = self._state.get_worker(worker_id) worker.update(worker_reference, get_work=get_work) def _update_priority(self, task, prio, worker): """ Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled. """ task.priority = prio = max(prio, task.priority) for dep in task.deps or []: t = self._state.get_task(dep) if t is not None and prio > t.priority: self._update_priority(t, prio, worker) def add_task(self, task_id=None, status=PENDING, runnable=True, deps=None, new_deps=None, expl=None, resources=None, priority=0, family='', module=None, params=None, assistant=False, tracking_url=None, **kwargs): """ * add task identified by task_id if it doesn't exist * if deps is not None, update dependency list * update status of task * add additional workers/stakeholders * update priority when needed """ worker_id = kwargs['worker'] self.update(worker_id) task = self._state.get_task(task_id, setdefault=self._make_task( task_id=task_id, status=PENDING, deps=deps, resources=resources, priority=priority, family=family, module=module, params=params)) # for setting priority, we'll sometimes create tasks with unset family and params if not task.family: task.family = family if not getattr(task, 'module', None): task.module = module if not task.params: task.params = _get_default(params, {}) if tracking_url is not None or task.status != RUNNING: task.tracking_url = tracking_url if task.remove is not None: task.remove = None # unmark task for removal so it isn't removed after being added if expl is not None: task.expl = expl if not (task.status == RUNNING and status == PENDING) or new_deps: # don't allow re-scheduling of task while it is running, it must either fail or succeed first if status == PENDING or status != task.status: # Update the DB only if there was a acctual change, to prevent noise. # We also check for status == PENDING b/c that's the default value # (so checking for status != task.status woule lie) self._update_task_history(task, status) self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config) if status == FAILED: task.retry = self._retry_time(task, self._config) if deps is not None: task.deps = set(deps) if new_deps is not None: task.deps.update(new_deps) if resources is not None: task.resources = resources if not assistant: task.stakeholders.add(worker_id) # Task dependencies might not exist yet. Let's create dummy tasks for them for now. # Otherwise the task dependencies might end up being pruned if scheduling takes a long time for dep in task.deps or []: t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority)) t.stakeholders.add(worker_id) self._update_priority(task, priority, worker_id) if runnable: task.workers.add(worker_id) self._state.get_worker(worker_id).tasks.add(task) task.runnable = runnable def add_worker(self, worker, info, **kwargs): self._state.get_worker(worker).add_info(info) def update_resources(self, **resources): if self._resources is None: self._resources = {} self._resources.update(resources) def _has_resources(self, needed_resources, used_resources): if needed_resources is None: return True available_resources = self._resources or {} for resource, amount in six.iteritems(needed_resources): if amount + used_resources[resource] > available_resources.get(resource, 1): return False return True def _used_resources(self): used_resources = collections.defaultdict(int) if self._resources is not None: for task in self._state.get_active_tasks(): if task.status == RUNNING and task.resources: for resource, amount in six.iteritems(task.resources): used_resources[resource] += amount return used_resources def _rank(self, task): """ Return worker's rank function for task scheduling. :return: """ return task.priority, -task.time def _schedulable(self, task): if task.status != PENDING: return False for dep in task.deps: dep_task = self._state.get_task(dep, default=None) if dep_task is None or dep_task.status != DONE: return False return True def _retry_time(self, task, config): return time.time() + config.retry_delay def get_work(self, host=None, assistant=False, current_tasks=None, **kwargs): # TODO: remove any expired nodes # Algo: iterate over all nodes, find the highest priority node no dependencies and available # resources. # Resource checking looks both at currently available resources and at which resources would # be available if all running tasks died and we rescheduled all workers greedily. We do both # checks in order to prevent a worker with many low-priority tasks from starving other # workers with higher priority tasks that share the same resources. # TODO: remove tasks that can't be done, figure out if the worker has absolutely # nothing it can wait for if self._config.prune_on_get_work: self.prune() worker_id = kwargs['worker'] # Return remaining tasks that have no FAILED descendants self.update(worker_id, {'host': host}, get_work=True) if assistant: self.add_worker(worker_id, [('assistant', assistant)]) best_task = None if current_tasks is not None: ct_set = set(current_tasks) for task in sorted(self._state.get_running_tasks(), key=self._rank): if task.worker_running == worker_id and task.id not in ct_set: best_task = task locally_pending_tasks = 0 running_tasks = [] upstream_table = {} greedy_resources = collections.defaultdict(int) n_unique_pending = 0 worker = self._state.get_worker(worker_id) if worker.is_trivial_worker(self._state): relevant_tasks = worker.get_pending_tasks(self._state) used_resources = collections.defaultdict(int) greedy_workers = dict() # If there's no resources, then they can grab any task else: relevant_tasks = self._state.get_pending_tasks() used_resources = self._used_resources() activity_limit = time.time() - self._config.worker_disconnect_delay active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit) greedy_workers = dict((worker.id, worker.info.get('workers', 1)) for worker in active_workers) tasks = list(relevant_tasks) tasks.sort(key=self._rank, reverse=True) for task in tasks: upstream_status = self._upstream_status(task.id, upstream_table) in_workers = (assistant and getattr(task, 'runnable', bool(task.workers))) or worker_id in task.workers if task.status == RUNNING and in_workers: # Return a list of currently running tasks to the client, # makes it easier to troubleshoot other_worker = self._state.get_worker(task.worker_running) more_info = {'task_id': task.id, 'worker': str(other_worker)} if other_worker is not None: more_info.update(other_worker.info) running_tasks.append(more_info) if task.status == PENDING and in_workers and upstream_status != UPSTREAM_DISABLED: locally_pending_tasks += 1 if len(task.workers) == 1 and not assistant: n_unique_pending += 1 if best_task: continue if task.status == RUNNING and (task.worker_running in greedy_workers): greedy_workers[task.worker_running] -= 1 for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount if self._schedulable(task) and self._has_resources(task.resources, greedy_resources): if in_workers and self._has_resources(task.resources, used_resources): best_task = task else: workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers for task_worker in workers: if greedy_workers.get(task_worker, 0) > 0: # use up a worker greedy_workers[task_worker] -= 1 # keep track of the resources used in greedy scheduling for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount break reply = {'n_pending_tasks': locally_pending_tasks, 'running_tasks': running_tasks, 'task_id': None, 'n_unique_pending': n_unique_pending} if best_task: self._state.set_status(best_task, RUNNING, self._config) best_task.worker_running = worker_id best_task.time_running = time.time() self._update_task_history(best_task, RUNNING, host=host) reply['task_id'] = best_task.id reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = best_task.params return reply def ping(self, **kwargs): worker_id = kwargs['worker'] self.update(worker_id) def _upstream_status(self, task_id, upstream_status_table): if task_id in upstream_status_table: return upstream_status_table[task_id] elif self._state.has_task(task_id): task_stack = [task_id] while task_stack: dep_id = task_stack.pop() if self._state.has_task(dep_id): dep = self._state.get_task(dep_id) if dep.status == DONE: continue if dep_id not in upstream_status_table: if dep.status == PENDING and dep.deps: task_stack = task_stack + [dep_id] + list(dep.deps) upstream_status_table[dep_id] = '' # will be updated postorder else: dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '') upstream_status_table[dep_id] = dep_status elif upstream_status_table[dep_id] == '' and dep.deps: # This is the postorder update step when we set the # status based on the previously calculated child elements upstream_status = [upstream_status_table.get(a_task_id, '') for a_task_id in dep.deps] upstream_status.append('') # to handle empty list status = max(upstream_status, key=UPSTREAM_SEVERITY_KEY) upstream_status_table[dep_id] = status return upstream_status_table[dep_id] def _serialize_task(self, task_id, include_deps=True, deps=None): task = self._state.get_task(task_id) ret = { 'status': task.status, 'workers': list(task.workers), 'worker_running': task.worker_running, 'time_running': getattr(task, "time_running", None), 'start_time': task.time, 'params': task.params, 'name': task.family, 'priority': task.priority, 'resources': task.resources, 'tracking_url': getattr(task, "tracking_url", None), } if task.status == DISABLED: ret['re_enable_able'] = task.scheduler_disable_time is not None if include_deps: ret['deps'] = list(task.deps if deps is None else deps) return ret def graph(self, **kwargs): self.prune() serialized = {} seen = set() for task in self._state.get_active_tasks(): serialized.update(self._traverse_graph(task.id, seen)) return serialized def _traverse_graph(self, root_task_id, seen=None, dep_func=None): """ Returns the dependency graph rooted at task_id This does a breadth-first traversal to find the nodes closest to the root before hitting the scheduler.max_graph_nodes limit. :param root_task_id: the id of the graph's root :return: A map of task id to serialized node """ if seen is None: seen = set() elif root_task_id in seen: return {} if dep_func is None: dep_func = lambda t: t.deps seen.add(root_task_id) serialized = {} queue = collections.deque([root_task_id]) while queue: task_id = queue.popleft() task = self._state.get_task(task_id) if task is None or not task.family: logger.warn('Missing task for id [%s]', task_id) # NOTE : If a dependency is missing from self._state there is no way to deduce the # task family and parameters. family, params = UNKNOWN, {} serialized[task_id] = { 'deps': [], 'status': UNKNOWN, 'workers': [], 'start_time': UNKNOWN, 'params': params, 'name': family, 'priority': 0, } else: deps = dep_func(task) serialized[task_id] = self._serialize_task(task_id, deps=deps) for dep in sorted(deps): if dep not in seen: seen.add(dep) queue.append(dep) if len(serialized) >= self._config.max_graph_nodes: break return serialized def dep_graph(self, task_id, **kwargs): self.prune() if not self._state.has_task(task_id): return {} return self._traverse_graph(task_id) def inverse_dep_graph(self, task_id, **kwargs): self.prune() if not self._state.has_task(task_id): return {} inverse_graph = collections.defaultdict(set) for task in self._state.get_active_tasks(): for dep in task.deps: inverse_graph[dep].add(task.id) return self._traverse_graph(task_id, dep_func=lambda t: inverse_graph[t.id]) def task_list(self, status, upstream_status, limit=True, search=None, **kwargs): """ Query for a subset of tasks by status. """ self.prune() result = {} upstream_status_table = {} # used to memoize upstream status if search is None: filter_func = lambda _: True else: terms = search.split() filter_func = lambda t: all(term in t.id for term in terms) for task in filter(filter_func, self._state.get_active_tasks(status)): if (task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table)): serialized = self._serialize_task(task.id, False) result[task.id] = serialized if limit and len(result) > self._config.max_shown_tasks: return {'num_tasks': len(result)} return result def worker_list(self, include_running=True, **kwargs): self.prune() workers = [ dict( name=worker.id, last_active=worker.last_active, started=getattr(worker, 'started', None), **worker.info ) for worker in self._state.get_active_workers()] workers.sort(key=lambda worker: worker['started'], reverse=True) if include_running: running = collections.defaultdict(dict) num_pending = collections.defaultdict(int) num_uniques = collections.defaultdict(int) for task in self._state.get_pending_tasks(): if task.status == RUNNING and task.worker_running: running[task.worker_running][task.id] = self._serialize_task(task.id, False) elif task.status == PENDING: for worker in task.workers: num_pending[worker] += 1 if len(task.workers) == 1: num_uniques[list(task.workers)[0]] += 1 for worker in workers: tasks = running[worker['name']] worker['num_running'] = len(tasks) worker['num_pending'] = num_pending[worker['name']] worker['num_uniques'] = num_uniques[worker['name']] worker['running'] = tasks return workers def task_search(self, task_str, **kwargs): """ Query for a subset of tasks by task_id. :param task_str: :return: """ self.prune() result = collections.defaultdict(dict) for task in self._state.get_active_tasks(): if task.id.find(task_str) != -1: serialized = self._serialize_task(task.id, False) result[task.status][task.id] = serialized return result def re_enable_task(self, task_id): serialized = {} task = self._state.get_task(task_id) if task and task.status == DISABLED and task.scheduler_disable_time: self._state.re_enable(task, self._config) serialized = self._serialize_task(task_id) return serialized def fetch_error(self, task_id, **kwargs): if self._state.has_task(task_id): return {"taskId": task_id, "error": self._state.get_task(task_id).expl} else: return {"taskId": task_id, "error": ""} def _update_task_history(self, task, status, host=None): try: if status == DONE or status == FAILED: successful = (status == DONE) self._task_history.task_finished(task, successful) elif status == PENDING: self._task_history.task_scheduled(task) elif status == RUNNING: self._task_history.task_started(task, host) except BaseException: logger.warning("Error saving Task history", exc_info=True) @property def task_history(self): # Used by server.py to expose the calls return self._task_history # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The system for scheduling tasks and executing them in order. Deals with dependencies, priorities, resources, etc. The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them. See :doc:`/central_scheduler` for more info. """ import collections try: import cPickle as pickle except ImportError: import pickle import datetime import functools import itertools import logging import os import time from luigi import six from luigi import configuration from luigi import notifications from luigi import parameter from luigi import task_history as history from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN from luigi.task import Config logger = logging.getLogger("luigi.server") class Scheduler(object): """ Abstract base class. Note that the methods all take string arguments, not Task objects... """"" add_task = NotImplemented get_work = NotImplemented ping = NotImplemented UPSTREAM_RUNNING = 'UPSTREAM_RUNNING' UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT' UPSTREAM_FAILED = 'UPSTREAM_FAILED' UPSTREAM_DISABLED = 'UPSTREAM_DISABLED' UPSTREAM_SEVERITY_ORDER = ( '', UPSTREAM_RUNNING, UPSTREAM_MISSING_INPUT, UPSTREAM_FAILED, UPSTREAM_DISABLED, ) UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index STATUS_TO_UPSTREAM_MAP = { FAILED: UPSTREAM_FAILED, RUNNING: UPSTREAM_RUNNING, PENDING: UPSTREAM_MISSING_INPUT, DISABLED: UPSTREAM_DISABLED, } class scheduler(Config): # TODO(erikbern): the config_path is needed for backwards compatilibity. We should drop the compatibility # at some point (in particular this would force users to replace all dashes with underscores in the config) retry_delay = parameter.FloatParameter(default=900.0) remove_delay = parameter.FloatParameter(default=600.0) worker_disconnect_delay = parameter.FloatParameter(default=60.0) state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle') # Jobs are disabled if we see more than disable_failures failures in disable_window seconds. # These disables last for disable_persist seconds. disable_window = parameter.IntParameter(default=3600, config_path=dict(section='scheduler', name='disable-window-seconds')) disable_failures = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-num-failures')) disable_hard_timeout = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-hard-timeout')) disable_persist = parameter.IntParameter(default=86400, config_path=dict(section='scheduler', name='disable-persist-seconds')) max_shown_tasks = parameter.IntParameter(default=100000) max_graph_nodes = parameter.IntParameter(default=100000) prune_done_tasks = parameter.BoolParameter(default=False) record_task_history = parameter.BoolParameter(default=False) prune_on_get_work = parameter.BoolParameter(default=False) def fix_time(x): # Backwards compatibility for a fix in Dec 2014. Prior to the fix, pickled state might store datetime objects # Let's remove this function soon if isinstance(x, datetime.datetime): return time.mktime(x.timetuple()) else: return x class Failures(object): """ This class tracks the number of failures in a given time window. Failures added are marked with the current timestamp, and this class counts the number of failures in a sliding time window ending at the present. """ def __init__(self, window): """ Initialize with the given window. :param window: how long to track failures for, as a float (number of seconds). """ self.window = window self.failures = collections.deque() self.first_failure_time = None def add_failure(self): """ Add a failure event with the current timestamp. """ failure_time = time.time() if not self.first_failure_time: self.first_failure_time = failure_time self.failures.append(failure_time) def num_failures(self): """ Return the number of failures in the window. """ min_time = time.time() - self.window while self.failures and fix_time(self.failures[0]) < min_time: self.failures.popleft() return len(self.failures) def clear(self): """ Clear the failure queue. """ self.failures.clear() def _get_default(x, default): if x is not None: return x else: return default class Task(object): def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None, params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None, tracking_url=None): self.id = task_id self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active) self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active if deps is None: self.deps = set() else: self.deps = set(deps) self.status = status # PENDING, RUNNING, FAILED or DONE self.time = time.time() # Timestamp when task was first added self.retry = None self.remove = None self.worker_running = None # the worker id that is currently running the task or None self.time_running = None # Timestamp when picked up by worker self.expl = None self.priority = priority self.resources = _get_default(resources, {}) self.family = family self.module = module self.params = _get_default(params, {}) self.disable_failures = disable_failures self.disable_hard_timeout = disable_hard_timeout self.failures = Failures(disable_window) self.tracking_url = tracking_url self.scheduler_disable_time = None self.runnable = False def __repr__(self): return "Task(%r)" % vars(self) def add_failure(self): self.failures.add_failure() def has_excessive_failures(self): if (self.failures.first_failure_time is not None and self.disable_hard_timeout): if (time.time() >= self.failures.first_failure_time + self.disable_hard_timeout): return True if self.failures.num_failures() >= self.disable_failures: return True return False def can_disable(self): return (self.disable_failures is not None or self.disable_hard_timeout is not None) class Worker(object): """ Structure for tracking worker activity and keeping their references. """ def __init__(self, worker_id, last_active=None): self.id = worker_id self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host) self.last_active = last_active or time.time() # seconds since epoch self.last_get_work = None self.started = time.time() # seconds since epoch self.tasks = set() # task objects self.info = {} def add_info(self, info): self.info.update(info) def update(self, worker_reference, get_work=False): if worker_reference: self.reference = worker_reference self.last_active = time.time() if get_work: self.last_get_work = time.time() def prune(self, config): # Delete workers that haven't said anything for a while (probably killed) if self.last_active + config.worker_disconnect_delay < time.time(): return True def get_pending_tasks(self, state): """ Get PENDING (and RUNNING) tasks for this worker. You have to pass in the state for optimization reasons. """ if len(self.tasks) < state.num_pending_tasks(): return six.moves.filter(lambda task: task.status in [PENDING, RUNNING], self.tasks) else: return state.get_pending_tasks() def is_trivial_worker(self, state): """ If it's not an assistant having only tasks that are without requirements. We have to pass the state parameter for optimization reasons. """ if self.assistant: return False return all(not task.resources for task in self.get_pending_tasks(state)) @property def assistant(self): return self.info.get('assistant', False) def __str__(self): return self.id class SimpleTaskState(object): """ Keep track of the current state and handle persistance. The point of this class is to enable other ways to keep state, eg. by using a database These will be implemented by creating an abstract base class that this and other classes inherit from. """ def __init__(self, state_path): self._state_path = state_path self._tasks = {} # map from id to a Task object self._status_tasks = collections.defaultdict(dict) self._active_workers = {} # map from id to a Worker object def get_state(self): return self._tasks, self._active_workers def set_state(self, state): self._tasks, self._active_workers = state def dump(self): try: with open(self._state_path, 'wb') as fobj: pickle.dump(self.get_state(), fobj) except IOError: logger.warning("Failed saving scheduler state", exc_info=1) else: logger.info("Saved state in %s", self._state_path) # prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control? def load(self): if os.path.exists(self._state_path): logger.info("Attempting to load state from %s", self._state_path) try: with open(self._state_path, 'rb') as fobj: state = pickle.load(fobj) except BaseException: logger.exception("Error when loading state. Starting from clean slate.") return self.set_state(state) self._status_tasks = collections.defaultdict(dict) for task in six.itervalues(self._tasks): self._status_tasks[task.status][task.id] = task # Convert from old format # TODO: this is really ugly, we need something more future-proof # Every time we add an attribute to the Worker or Task class, this # code needs to be updated # Compatibility since 2014-06-02 for k, v in six.iteritems(self._active_workers): if isinstance(v, float): self._active_workers[k] = Worker(worker_id=k, last_active=v) # Compatibility since 2015-05-28 if any(not hasattr(w, 'tasks') for k, w in six.iteritems(self._active_workers)): # If you load from an old format where Workers don't contain tasks. for k, worker in six.iteritems(self._active_workers): worker.tasks = set() for task in six.itervalues(self._tasks): for worker_id in task.workers: self._active_workers[worker_id].tasks.add(task) # Compatibility since 2015-04-28 if any(not hasattr(t, 'disable_hard_timeout') for t in six.itervalues(self._tasks)): for t in six.itervalues(self._tasks): t.disable_hard_timeout = None else: logger.info("No prior state file exists at %s. Starting with clean slate", self._state_path) def get_active_tasks(self, status=None): if status: for task in six.itervalues(self._status_tasks[status]): yield task else: for task in six.itervalues(self._tasks): yield task def get_running_tasks(self): return six.itervalues(self._status_tasks[RUNNING]) def get_pending_tasks(self): return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status]) for status in [PENDING, RUNNING]) def num_pending_tasks(self): """ Return how many tasks are PENDING + RUNNING. O(1). """ return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING]) def get_task(self, task_id, default=None, setdefault=None): if setdefault: task = self._tasks.setdefault(task_id, setdefault) self._status_tasks[task.status][task.id] = task return task else: return self._tasks.get(task_id, default) def has_task(self, task_id): return task_id in self._tasks def re_enable(self, task, config=None): task.scheduler_disable_time = None task.failures.clear() if config: self.set_status(task, FAILED, config) task.failures.clear() def set_status(self, task, new_status, config=None): if new_status == FAILED: assert config is not None if new_status == DISABLED and task.status == RUNNING: return if task.status == DISABLED: if new_status == DONE: self.re_enable(task) # don't allow workers to override a scheduler disable elif task.scheduler_disable_time is not None and new_status != DISABLED: return if new_status == FAILED and task.can_disable() and task.status != DISABLED: task.add_failure() if task.has_excessive_failures(): task.scheduler_disable_time = time.time() new_status = DISABLED notifications.send_error_email( 'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id), '{task} failed {failures} times in the last {window} seconds, so it is being ' 'disabled for {persist} seconds'.format( failures=config.disable_failures, task=task.id, window=config.disable_window, persist=config.disable_persist, )) elif new_status == DISABLED: task.scheduler_disable_time = None self._status_tasks[task.status].pop(task.id) self._status_tasks[new_status][task.id] = task task.status = new_status def fail_dead_worker_task(self, task, config, assistants): # If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants: logger.info("Task %r is marked as running by disconnected worker %r -> marking as " "FAILED with retry delay of %rs", task.id, task.worker_running, config.retry_delay) task.worker_running = None self.set_status(task, FAILED, config) task.retry = time.time() + config.retry_delay def prune(self, task, config): remove = False # Mark tasks with no remaining active stakeholders for deletion if not task.stakeholders: if task.remove is None: logger.info("Task %r has stakeholders %r but none remain connected -> will remove " "task in %s seconds", task.id, task.stakeholders, config.remove_delay) task.remove = time.time() + config.remove_delay # Re-enable task after the disable time expires if task.status == DISABLED and task.scheduler_disable_time is not None: if time.time() - fix_time(task.scheduler_disable_time) > config.disable_persist: self.re_enable(task, config) # Remove tasks that have no stakeholders if task.remove and time.time() > task.remove: logger.info("Removing task %r (no connected stakeholders)", task.id) remove = True # Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0 if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time(): self.set_status(task, PENDING, config) return remove def inactivate_tasks(self, delete_tasks): # The terminology is a bit confusing: we used to "delete" tasks when they became inactive, # but with a pluggable state storage, you might very well want to keep some history of # older tasks as well. That's why we call it "inactivate" (as in the verb) for task in delete_tasks: task_obj = self._tasks.pop(task) self._status_tasks[task_obj.status].pop(task) def get_active_workers(self, last_active_lt=None, last_get_work_gt=None): for worker in six.itervalues(self._active_workers): if last_active_lt is not None and worker.last_active >= last_active_lt: continue last_get_work = getattr(worker, 'last_get_work', None) if last_get_work_gt is not None and ( last_get_work is None or last_get_work <= last_get_work_gt): continue yield worker def get_assistants(self, last_active_lt=None): return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt)) def get_worker_ids(self): return self._active_workers.keys() # only used for unit tests def get_worker(self, worker_id): return self._active_workers.setdefault(worker_id, Worker(worker_id)) def inactivate_workers(self, delete_workers): # Mark workers as inactive for worker in delete_workers: self._active_workers.pop(worker) # remove workers from tasks for task in self.get_active_tasks(): task.stakeholders.difference_update(delete_workers) task.workers.difference_update(delete_workers) def get_necessary_tasks(self): necessary_tasks = set() for task in self.get_active_tasks(): if task.status not in (DONE, DISABLED) or \ getattr(task, 'scheduler_disable_time', None) is not None: necessary_tasks.update(task.deps) necessary_tasks.add(task.id) return necessary_tasks class CentralPlannerScheduler(Scheduler): """ Async scheduler that can handle multiple workers, etc. Can be run locally or on a server (using RemoteScheduler + server.Server). """ def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs): """ Keyword Arguments: :param config: an object of class "scheduler" or None (in which the global instance will be used) :param resources: a dict of str->int constraints :param task_history_override: ignore config and use this object as the task history """ self._config = config or scheduler(**kwargs) self._state = SimpleTaskState(self._config.state_path) if task_history_impl: self._task_history = task_history_impl elif self._config.record_task_history: from luigi import db_task_history # Needs sqlalchemy, thus imported here self._task_history = db_task_history.DbTaskHistory() else: self._task_history = history.NopHistory() self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter? self._make_task = functools.partial( Task, disable_failures=self._config.disable_failures, disable_hard_timeout=self._config.disable_hard_timeout, disable_window=self._config.disable_window) self._worker_requests = {} def load(self): self._state.load() def dump(self): self._state.dump() def prune(self): logger.info("Starting pruning of task graph") remove_workers = [] for worker in self._state.get_active_workers(): if worker.prune(self._config): logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay) remove_workers.append(worker.id) self._state.inactivate_workers(remove_workers) assistant_ids = set(w.id for w in self._state.get_assistants()) remove_tasks = [] if assistant_ids: necessary_tasks = self._state.get_necessary_tasks() else: necessary_tasks = () for task in self._state.get_active_tasks(): self._state.fail_dead_worker_task(task, self._config, assistant_ids) removed = self._state.prune(task, self._config) if removed and task.id not in necessary_tasks: remove_tasks.append(task.id) self._state.inactivate_tasks(remove_tasks) logger.info("Done pruning task graph") def update(self, worker_id, worker_reference=None, get_work=False): """ Keep track of whenever the worker was last active. """ worker = self._state.get_worker(worker_id) worker.update(worker_reference, get_work=get_work) def _update_priority(self, task, prio, worker): """ Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled. """ task.priority = prio = max(prio, task.priority) for dep in task.deps or []: t = self._state.get_task(dep) if t is not None and prio > t.priority: self._update_priority(t, prio, worker) def add_task(self, task_id=None, status=PENDING, runnable=True, deps=None, new_deps=None, expl=None, resources=None, priority=0, family='', module=None, params=None, assistant=False, tracking_url=None, **kwargs): """ * add task identified by task_id if it doesn't exist * if deps is not None, update dependency list * update status of task * add additional workers/stakeholders * update priority when needed """ worker_id = kwargs['worker'] self.update(worker_id) task = self._state.get_task(task_id, setdefault=self._make_task( task_id=task_id, status=PENDING, deps=deps, resources=resources, priority=priority, family=family, module=module, params=params)) # for setting priority, we'll sometimes create tasks with unset family and params if not task.family: task.family = family if not getattr(task, 'module', None): task.module = module if not task.params: task.params = _get_default(params, {}) if tracking_url is not None or task.status != RUNNING: task.tracking_url = tracking_url if task.remove is not None: task.remove = None # unmark task for removal so it isn't removed after being added if expl is not None: task.expl = expl if not (task.status == RUNNING and status == PENDING) or new_deps: # don't allow re-scheduling of task while it is running, it must either fail or succeed first if status == PENDING or status != task.status: # Update the DB only if there was a acctual change, to prevent noise. # We also check for status == PENDING b/c that's the default value # (so checking for status != task.status woule lie) self._update_task_history(task, status) self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config) if status == FAILED: task.retry = self._retry_time(task, self._config) if deps is not None: task.deps = set(deps) if new_deps is not None: task.deps.update(new_deps) if resources is not None: task.resources = resources if not assistant: task.stakeholders.add(worker_id) # Task dependencies might not exist yet. Let's create dummy tasks for them for now. # Otherwise the task dependencies might end up being pruned if scheduling takes a long time for dep in task.deps or []: t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority)) t.stakeholders.add(worker_id) self._update_priority(task, priority, worker_id) if runnable: task.workers.add(worker_id) self._state.get_worker(worker_id).tasks.add(task) task.runnable = runnable def add_worker(self, worker, info, **kwargs): self._state.get_worker(worker).add_info(info) def update_resources(self, **resources): if self._resources is None: self._resources = {} self._resources.update(resources) def _has_resources(self, needed_resources, used_resources): if needed_resources is None: return True available_resources = self._resources or {} for resource, amount in six.iteritems(needed_resources): if amount + used_resources[resource] > available_resources.get(resource, 1): return False return True def _used_resources(self): used_resources = collections.defaultdict(int) if self._resources is not None: for task in self._state.get_active_tasks(): if task.status == RUNNING and task.resources: for resource, amount in six.iteritems(task.resources): used_resources[resource] += amount return used_resources def _rank(self, task): """ Return worker's rank function for task scheduling. :return: """ return task.priority, -task.time def _schedulable(self, task): if task.status != PENDING: return False for dep in task.deps: dep_task = self._state.get_task(dep, default=None) if dep_task is None or dep_task.status != DONE: return False return True def _retry_time(self, task, config): return time.time() + config.retry_delay def get_work(self, host=None, assistant=False, current_tasks=None, **kwargs): # TODO: remove any expired nodes # Algo: iterate over all nodes, find the highest priority node no dependencies and available # resources. # Resource checking looks both at currently available resources and at which resources would # be available if all running tasks died and we rescheduled all workers greedily. We do both # checks in order to prevent a worker with many low-priority tasks from starving other # workers with higher priority tasks that share the same resources. # TODO: remove tasks that can't be done, figure out if the worker has absolutely # nothing it can wait for if self._config.prune_on_get_work: self.prune() worker_id = kwargs['worker'] # Return remaining tasks that have no FAILED descendants self.update(worker_id, {'host': host}, get_work=True) if assistant: self.add_worker(worker_id, [('assistant', assistant)]) best_task = None if current_tasks is not None: ct_set = set(current_tasks) for task in sorted(self._state.get_running_tasks(), key=self._rank): if task.worker_running == worker_id and task.id not in ct_set: best_task = task locally_pending_tasks = 0 running_tasks = [] upstream_table = {} greedy_resources = collections.defaultdict(int) n_unique_pending = 0 worker = self._state.get_worker(worker_id) if worker.is_trivial_worker(self._state): relevant_tasks = worker.get_pending_tasks(self._state) used_resources = collections.defaultdict(int) greedy_workers = dict() # If there's no resources, then they can grab any task else: relevant_tasks = self._state.get_pending_tasks() used_resources = self._used_resources() activity_limit = time.time() - self._config.worker_disconnect_delay active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit) greedy_workers = dict((worker.id, worker.info.get('workers', 1)) for worker in active_workers) tasks = list(relevant_tasks) tasks.sort(key=self._rank, reverse=True) for task in tasks: upstream_status = self._upstream_status(task.id, upstream_table) in_workers = (assistant and getattr(task, 'runnable', bool(task.workers))) or worker_id in task.workers if task.status == RUNNING and in_workers: # Return a list of currently running tasks to the client, # makes it easier to troubleshoot other_worker = self._state.get_worker(task.worker_running) more_info = {'task_id': task.id, 'worker': str(other_worker)} if other_worker is not None: more_info.update(other_worker.info) running_tasks.append(more_info) if task.status == PENDING and in_workers and upstream_status != UPSTREAM_DISABLED: locally_pending_tasks += 1 if len(task.workers) == 1 and not assistant: n_unique_pending += 1 if best_task: continue if task.status == RUNNING and (task.worker_running in greedy_workers): greedy_workers[task.worker_running] -= 1 for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount if self._schedulable(task) and self._has_resources(task.resources, greedy_resources): if in_workers and self._has_resources(task.resources, used_resources): best_task = task else: workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers for task_worker in workers: if greedy_workers.get(task_worker, 0) > 0: # use up a worker greedy_workers[task_worker] -= 1 # keep track of the resources used in greedy scheduling for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount break reply = {'n_pending_tasks': locally_pending_tasks, 'running_tasks': running_tasks, 'task_id': None, 'n_unique_pending': n_unique_pending} if best_task: self._state.set_status(best_task, RUNNING, self._config) best_task.worker_running = worker_id best_task.time_running = time.time() self._update_task_history(best_task, RUNNING, host=host) reply['task_id'] = best_task.id reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = best_task.params return reply def ping(self, **kwargs): worker_id = kwargs['worker'] self.update(worker_id) def _upstream_status(self, task_id, upstream_status_table): if task_id in upstream_status_table: return upstream_status_table[task_id] elif self._state.has_task(task_id): task_stack = [task_id] while task_stack: dep_id = task_stack.pop() if self._state.has_task(dep_id): dep = self._state.get_task(dep_id) if dep.status == DONE: continue if dep_id not in upstream_status_table: if dep.status == PENDING and dep.deps: task_stack = task_stack + [dep_id] + list(dep.deps) upstream_status_table[dep_id] = '' # will be updated postorder else: dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '') upstream_status_table[dep_id] = dep_status elif upstream_status_table[dep_id] == '' and dep.deps: # This is the postorder update step when we set the # status based on the previously calculated child elements upstream_status = [upstream_status_table.get(a_task_id, '') for a_task_id in dep.deps] upstream_status.append('') # to handle empty list status = max(upstream_status, key=UPSTREAM_SEVERITY_KEY) upstream_status_table[dep_id] = status return upstream_status_table[dep_id] def _serialize_task(self, task_id, include_deps=True, deps=None): task = self._state.get_task(task_id) ret = { 'status': task.status, 'workers': list(task.workers), 'worker_running': task.worker_running, 'time_running': getattr(task, "time_running", None), 'start_time': task.time, 'params': task.params, 'name': task.family, 'priority': task.priority, 'resources': task.resources, 'tracking_url': getattr(task, "tracking_url", None), } if task.status == DISABLED: ret['re_enable_able'] = task.scheduler_disable_time is not None if include_deps: ret['deps'] = list(task.deps if deps is None else deps) return ret def graph(self, **kwargs): self.prune() serialized = {} seen = set() for task in self._state.get_active_tasks(): serialized.update(self._traverse_graph(task.id, seen)) return serialized def _traverse_graph(self, root_task_id, seen=None, dep_func=None): """ Returns the dependency graph rooted at task_id This does a breadth-first traversal to find the nodes closest to the root before hitting the scheduler.max_graph_nodes limit. :param root_task_id: the id of the graph's root :return: A map of task id to serialized node """ if seen is None: seen = set() elif root_task_id in seen: return {} if dep_func is None: dep_func = lambda t: t.deps seen.add(root_task_id) serialized = {} queue = collections.deque([root_task_id]) while queue: task_id = queue.popleft() task = self._state.get_task(task_id) if task is None or not task.family: logger.warn('Missing task for id [%s]', task_id) # NOTE : If a dependency is missing from self._state there is no way to deduce the # task family and parameters. family, params = UNKNOWN, {} serialized[task_id] = { 'deps': [], 'status': UNKNOWN, 'workers': [], 'start_time': UNKNOWN, 'params': params, 'name': family, 'priority': 0, } else: deps = dep_func(task) serialized[task_id] = self._serialize_task(task_id, deps=deps) for dep in sorted(deps): if dep not in seen: seen.add(dep) queue.append(dep) if len(serialized) >= self._config.max_graph_nodes: break return serialized def dep_graph(self, task_id, **kwargs): self.prune() if not self._state.has_task(task_id): return {} return self._traverse_graph(task_id) def inverse_dep_graph(self, task_id, **kwargs): self.prune() if not self._state.has_task(task_id): return {} inverse_graph = collections.defaultdict(set) for task in self._state.get_active_tasks(): for dep in task.deps: inverse_graph[dep].add(task.id) return self._traverse_graph(task_id, dep_func=lambda t: inverse_graph[t.id]) def task_list(self, status, upstream_status, limit=True, search=None, **kwargs): """ Query for a subset of tasks by status. """ self.prune() result = {} upstream_status_table = {} # used to memoize upstream status if search is None: filter_func = lambda _: True else: terms = search.split() filter_func = lambda t: all(term in t.id for term in terms) for task in filter(filter_func, self._state.get_active_tasks(status)): if (task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table)): serialized = self._serialize_task(task.id, False) result[task.id] = serialized if limit and len(result) > self._config.max_shown_tasks: return {'num_tasks': len(result)} return result def worker_list(self, include_running=True, **kwargs): self.prune() workers = [ dict( name=worker.id, last_active=worker.last_active, started=getattr(worker, 'started', None), **worker.info ) for worker in self._state.get_active_workers()] workers.sort(key=lambda worker: worker['started'], reverse=True) if include_running: running = collections.defaultdict(dict) num_pending = collections.defaultdict(int) num_uniques = collections.defaultdict(int) for task in self._state.get_pending_tasks(): if task.status == RUNNING and task.worker_running: running[task.worker_running][task.id] = self._serialize_task(task.id, False) elif task.status == PENDING: for worker in task.workers: num_pending[worker] += 1 if len(task.workers) == 1: num_uniques[list(task.workers)[0]] += 1 for worker in workers: tasks = running[worker['name']] worker['num_running'] = len(tasks) worker['num_pending'] = num_pending[worker['name']] worker['num_uniques'] = num_uniques[worker['name']] worker['running'] = tasks return workers def task_search(self, task_str, **kwargs): """ Query for a subset of tasks by task_id. :param task_str: :return: """ self.prune() result = collections.defaultdict(dict) for task in self._state.get_active_tasks(): if task.id.find(task_str) != -1: serialized = self._serialize_task(task.id, False) result[task.status][task.id] = serialized return result def re_enable_task(self, task_id): serialized = {} task = self._state.get_task(task_id) if task and task.status == DISABLED and task.scheduler_disable_time: self._state.re_enable(task, self._config) serialized = self._serialize_task(task_id) return serialized def fetch_error(self, task_id, **kwargs): if self._state.has_task(task_id): return {"taskId": task_id, "error": self._state.get_task(task_id).expl} else: return {"taskId": task_id, "error": ""} def _update_task_history(self, task, status, host=None): try: if status == DONE or status == FAILED: successful = (status == DONE) self._task_history.task_finished(task, successful) elif status == PENDING: self._task_history.task_scheduled(task) elif status == RUNNING: self._task_history.task_started(task, host) except BaseException: logger.warning("Error saving Task history", exc_info=True) @property def task_history(self): # Used by server.py to expose the calls return self._task_history
BugsInPy/BugsInPy/temp/projects/luigi/bug-16-fixed/luigi/luigi/scheduler.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-16-buggy/luigi/luigi/scheduler.py
luigi-bug-14
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The system for scheduling tasks and executing them in order. Deals with dependencies, priorities, resources, etc. The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them. See :doc:`/central_scheduler` for more info. """ import collections try: import cPickle as pickle except ImportError: import pickle import functools import itertools import logging import os import re import time from luigi import six from luigi import configuration from luigi import notifications from luigi import parameter from luigi import task_history as history from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN from luigi.task import Config logger = logging.getLogger("luigi.server") class Scheduler(object): """ Abstract base class. Note that the methods all take string arguments, not Task objects... """"" add_task = NotImplemented get_work = NotImplemented ping = NotImplemented UPSTREAM_RUNNING = 'UPSTREAM_RUNNING' UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT' UPSTREAM_FAILED = 'UPSTREAM_FAILED' UPSTREAM_DISABLED = 'UPSTREAM_DISABLED' UPSTREAM_SEVERITY_ORDER = ( '', UPSTREAM_RUNNING, UPSTREAM_MISSING_INPUT, UPSTREAM_FAILED, UPSTREAM_DISABLED, ) UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index STATUS_TO_UPSTREAM_MAP = { FAILED: UPSTREAM_FAILED, RUNNING: UPSTREAM_RUNNING, PENDING: UPSTREAM_MISSING_INPUT, DISABLED: UPSTREAM_DISABLED, } TASK_FAMILY_RE = re.compile(r'([^(_]+)[(_]') class scheduler(Config): # TODO(erikbern): the config_path is needed for backwards compatilibity. We # should drop the compatibility at some point retry_delay = parameter.FloatParameter(default=900.0) remove_delay = parameter.FloatParameter(default=600.0) worker_disconnect_delay = parameter.FloatParameter(default=60.0) state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle') # Jobs are disabled if we see more than disable_failures failures in disable_window seconds. # These disables last for disable_persist seconds. disable_window = parameter.IntParameter(default=3600, config_path=dict(section='scheduler', name='disable-window-seconds')) disable_failures = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-num-failures')) disable_hard_timeout = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-hard-timeout')) disable_persist = parameter.IntParameter(default=86400, config_path=dict(section='scheduler', name='disable-persist-seconds')) max_shown_tasks = parameter.IntParameter(default=100000) max_graph_nodes = parameter.IntParameter(default=100000) prune_done_tasks = parameter.BoolParameter(default=False) record_task_history = parameter.BoolParameter(default=False) prune_on_get_work = parameter.BoolParameter(default=False) class Failures(object): """ This class tracks the number of failures in a given time window. Failures added are marked with the current timestamp, and this class counts the number of failures in a sliding time window ending at the present. """ def __init__(self, window): """ Initialize with the given window. :param window: how long to track failures for, as a float (number of seconds). """ self.window = window self.failures = collections.deque() self.first_failure_time = None def add_failure(self): """ Add a failure event with the current timestamp. """ failure_time = time.time() if not self.first_failure_time: self.first_failure_time = failure_time self.failures.append(failure_time) def num_failures(self): """ Return the number of failures in the window. """ min_time = time.time() - self.window while self.failures and self.failures[0] < min_time: self.failures.popleft() return len(self.failures) def clear(self): """ Clear the failure queue. """ self.failures.clear() def _get_default(x, default): if x is not None: return x else: return default class Task(object): def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None, params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None, tracking_url=None, status_message=None): self.id = task_id self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active) self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active if deps is None: self.deps = set() else: self.deps = set(deps) self.status = status # PENDING, RUNNING, FAILED or DONE self.time = time.time() # Timestamp when task was first added self.updated = self.time self.retry = None self.remove = None self.worker_running = None # the worker id that is currently running the task or None self.time_running = None # Timestamp when picked up by worker self.expl = None self.priority = priority self.resources = _get_default(resources, {}) self.family = family self.module = module self.params = _get_default(params, {}) self.disable_failures = disable_failures self.disable_hard_timeout = disable_hard_timeout self.failures = Failures(disable_window) self.tracking_url = tracking_url self.status_message = status_message self.scheduler_disable_time = None self.runnable = False def __repr__(self): return "Task(%r)" % vars(self) def add_failure(self): self.failures.add_failure() def has_excessive_failures(self): if (self.failures.first_failure_time is not None and self.disable_hard_timeout): if (time.time() >= self.failures.first_failure_time + self.disable_hard_timeout): return True if self.failures.num_failures() >= self.disable_failures: return True return False def can_disable(self): return (self.disable_failures is not None or self.disable_hard_timeout is not None) @property def pretty_id(self): param_str = ', '.join('{}={}'.format(key, value) for key, value in self.params.items()) return '{}({})'.format(self.family, param_str) class Worker(object): """ Structure for tracking worker activity and keeping their references. """ def __init__(self, worker_id, last_active=None): self.id = worker_id self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host) self.last_active = last_active or time.time() # seconds since epoch self.last_get_work = None self.started = time.time() # seconds since epoch self.tasks = set() # task objects self.info = {} self.disabled = False def add_info(self, info): self.info.update(info) def update(self, worker_reference, get_work=False): if worker_reference: self.reference = worker_reference self.last_active = time.time() if get_work: self.last_get_work = time.time() def prune(self, config): # Delete workers that haven't said anything for a while (probably killed) if self.last_active + config.worker_disconnect_delay < time.time(): return True def get_pending_tasks(self, state): """ Get PENDING (and RUNNING) tasks for this worker. You have to pass in the state for optimization reasons. """ if len(self.tasks) < state.num_pending_tasks(): return six.moves.filter(lambda task: task.status in [PENDING, RUNNING], self.tasks) else: return state.get_pending_tasks() def is_trivial_worker(self, state): """ If it's not an assistant having only tasks that are without requirements. We have to pass the state parameter for optimization reasons. """ if self.assistant: return False return all(not task.resources for task in self.get_pending_tasks(state)) @property def assistant(self): return self.info.get('assistant', False) def __str__(self): return self.id class SimpleTaskState(object): """ Keep track of the current state and handle persistance. The point of this class is to enable other ways to keep state, eg. by using a database These will be implemented by creating an abstract base class that this and other classes inherit from. """ def __init__(self, state_path): self._state_path = state_path self._tasks = {} # map from id to a Task object self._status_tasks = collections.defaultdict(dict) self._active_workers = {} # map from id to a Worker object def get_state(self): return self._tasks, self._active_workers def set_state(self, state): self._tasks, self._active_workers = state def dump(self): try: with open(self._state_path, 'wb') as fobj: pickle.dump(self.get_state(), fobj) except IOError: logger.warning("Failed saving scheduler state", exc_info=1) else: logger.info("Saved state in %s", self._state_path) # prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control? def load(self): if os.path.exists(self._state_path): logger.info("Attempting to load state from %s", self._state_path) try: with open(self._state_path, 'rb') as fobj: state = pickle.load(fobj) except BaseException: logger.exception("Error when loading state. Starting from empty state.") return self.set_state(state) self._status_tasks = collections.defaultdict(dict) for task in six.itervalues(self._tasks): self._status_tasks[task.status][task.id] = task else: logger.info("No prior state file exists at %s. Starting with empty state", self._state_path) def get_active_tasks(self, status=None): if status: for task in six.itervalues(self._status_tasks[status]): yield task else: for task in six.itervalues(self._tasks): yield task def get_running_tasks(self): return six.itervalues(self._status_tasks[RUNNING]) def get_pending_tasks(self): return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status]) for status in [PENDING, RUNNING]) def num_pending_tasks(self): """ Return how many tasks are PENDING + RUNNING. O(1). """ return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING]) def get_task(self, task_id, default=None, setdefault=None): if setdefault: task = self._tasks.setdefault(task_id, setdefault) self._status_tasks[task.status][task.id] = task return task else: return self._tasks.get(task_id, default) def has_task(self, task_id): return task_id in self._tasks def re_enable(self, task, config=None): task.scheduler_disable_time = None task.failures.clear() if config: self.set_status(task, FAILED, config) task.failures.clear() def set_status(self, task, new_status, config=None): if new_status == FAILED: assert config is not None if new_status == DISABLED and task.status == RUNNING: return if task.status == DISABLED: if new_status == DONE: self.re_enable(task) # don't allow workers to override a scheduler disable elif task.scheduler_disable_time is not None and new_status != DISABLED: return if new_status == FAILED and task.can_disable() and task.status != DISABLED: task.add_failure() if task.has_excessive_failures(): task.scheduler_disable_time = time.time() new_status = DISABLED notifications.send_error_email( 'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id), '{task} failed {failures} times in the last {window} seconds, so it is being ' 'disabled for {persist} seconds'.format( failures=config.disable_failures, task=task.id, window=config.disable_window, persist=config.disable_persist, )) elif new_status == DISABLED: task.scheduler_disable_time = None if new_status != task.status: self._status_tasks[task.status].pop(task.id) self._status_tasks[new_status][task.id] = task task.status = new_status task.updated = time.time() def fail_dead_worker_task(self, task, config, assistants): # If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants: logger.info("Task %r is marked as running by disconnected worker %r -> marking as " "FAILED with retry delay of %rs", task.id, task.worker_running, config.retry_delay) task.worker_running = None self.set_status(task, FAILED, config) task.retry = time.time() + config.retry_delay def update_status(self, task, config): # Mark tasks with no remaining active stakeholders for deletion if not task.stakeholders: if task.remove is None: logger.info("Task %r has stakeholders %r but none remain connected -> might remove " "task in %s seconds", task.id, task.stakeholders, config.remove_delay) task.remove = time.time() + config.remove_delay # Re-enable task after the disable time expires if task.status == DISABLED and task.scheduler_disable_time is not None: if time.time() - task.scheduler_disable_time > config.disable_persist: self.re_enable(task, config) # Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0 if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time(): self.set_status(task, PENDING, config) def may_prune(self, task): return task.remove and time.time() > task.remove def inactivate_tasks(self, delete_tasks): # The terminology is a bit confusing: we used to "delete" tasks when they became inactive, # but with a pluggable state storage, you might very well want to keep some history of # older tasks as well. That's why we call it "inactivate" (as in the verb) for task in delete_tasks: task_obj = self._tasks.pop(task) self._status_tasks[task_obj.status].pop(task) def get_active_workers(self, last_active_lt=None, last_get_work_gt=None): for worker in six.itervalues(self._active_workers): if last_active_lt is not None and worker.last_active >= last_active_lt: continue last_get_work = getattr(worker, 'last_get_work', None) if last_get_work_gt is not None and ( last_get_work is None or last_get_work <= last_get_work_gt): continue yield worker def get_assistants(self, last_active_lt=None): return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt)) def get_worker_ids(self): return self._active_workers.keys() # only used for unit tests def get_worker(self, worker_id): return self._active_workers.setdefault(worker_id, Worker(worker_id)) def inactivate_workers(self, delete_workers): # Mark workers as inactive for worker in delete_workers: self._active_workers.pop(worker) self._remove_workers_from_tasks(delete_workers) def _remove_workers_from_tasks(self, workers, remove_stakeholders=True): for task in self.get_active_tasks(): if remove_stakeholders: task.stakeholders.difference_update(workers) task.workers.difference_update(workers) def disable_workers(self, workers): self._remove_workers_from_tasks(workers, remove_stakeholders=False) for worker in workers: self.get_worker(worker).disabled = True def get_necessary_tasks(self): necessary_tasks = set() for task in self.get_active_tasks(): if task.status not in (DONE, DISABLED, UNKNOWN) or \ task.scheduler_disable_time is not None: necessary_tasks.update(task.deps) necessary_tasks.add(task.id) return necessary_tasks class CentralPlannerScheduler(Scheduler): """ Async scheduler that can handle multiple workers, etc. Can be run locally or on a server (using RemoteScheduler + server.Server). """ def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs): """ Keyword Arguments: :param config: an object of class "scheduler" or None (in which the global instance will be used) :param resources: a dict of str->int constraints :param task_history_override: ignore config and use this object as the task history """ self._config = config or scheduler(**kwargs) self._state = SimpleTaskState(self._config.state_path) if task_history_impl: self._task_history = task_history_impl elif self._config.record_task_history: from luigi import db_task_history # Needs sqlalchemy, thus imported here self._task_history = db_task_history.DbTaskHistory() else: self._task_history = history.NopHistory() self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter? self._make_task = functools.partial( Task, disable_failures=self._config.disable_failures, disable_hard_timeout=self._config.disable_hard_timeout, disable_window=self._config.disable_window) self._worker_requests = {} def load(self): self._state.load() def dump(self): self._state.dump() def prune(self): logger.info("Starting pruning of task graph") self._prune_workers() self._prune_tasks() logger.info("Done pruning task graph") def _prune_workers(self): remove_workers = [] for worker in self._state.get_active_workers(): if worker.prune(self._config): logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay) remove_workers.append(worker.id) self._state.inactivate_workers(remove_workers) def _prune_tasks(self): assistant_ids = set(w.id for w in self._state.get_assistants()) remove_tasks = [] if assistant_ids: necessary_tasks = self._state.get_necessary_tasks() else: necessary_tasks = () for task in self._state.get_active_tasks(): self._state.fail_dead_worker_task(task, self._config, assistant_ids) self._state.update_status(task, self._config) if self._state.may_prune(task) and task.id not in necessary_tasks: logger.info("Removing task %r", task.id) remove_tasks.append(task.id) self._state.inactivate_tasks(remove_tasks) def update(self, worker_id, worker_reference=None, get_work=False): """ Keep track of whenever the worker was last active. """ worker = self._state.get_worker(worker_id) worker.update(worker_reference, get_work=get_work) return not getattr(worker, 'disabled', False) def _update_priority(self, task, prio, worker): """ Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled. """ task.priority = prio = max(prio, task.priority) for dep in task.deps or []: t = self._state.get_task(dep) if t is not None and prio > t.priority: self._update_priority(t, prio, worker) def add_task(self, task_id=None, status=PENDING, runnable=True, deps=None, new_deps=None, expl=None, resources=None, priority=0, family='', module=None, params=None, assistant=False, tracking_url=None, **kwargs): """ * add task identified by task_id if it doesn't exist * if deps is not None, update dependency list * update status of task * add additional workers/stakeholders * update priority when needed """ worker_id = kwargs['worker'] worker_enabled = self.update(worker_id) if worker_enabled: _default_task = self._make_task( task_id=task_id, status=PENDING, deps=deps, resources=resources, priority=priority, family=family, module=module, params=params, ) else: _default_task = None task = self._state.get_task(task_id, setdefault=_default_task) if task is None or (task.status != RUNNING and not worker_enabled): return # for setting priority, we'll sometimes create tasks with unset family and params if not task.family: task.family = family if not getattr(task, 'module', None): task.module = module if not task.params: task.params = _get_default(params, {}) if tracking_url is not None or task.status != RUNNING: task.tracking_url = tracking_url if task.remove is not None: task.remove = None # unmark task for removal so it isn't removed after being added if expl is not None: task.expl = expl if not (task.status == RUNNING and status == PENDING) or new_deps: # don't allow re-scheduling of task while it is running, it must either fail or succeed first if status == PENDING or status != task.status: # Update the DB only if there was a acctual change, to prevent noise. # We also check for status == PENDING b/c that's the default value # (so checking for status != task.status woule lie) self._update_task_history(task, status) self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config) if status == FAILED: task.retry = self._retry_time(task, self._config) if deps is not None: task.deps = set(deps) if new_deps is not None: task.deps.update(new_deps) if resources is not None: task.resources = resources if worker_enabled and not assistant: task.stakeholders.add(worker_id) # Task dependencies might not exist yet. Let's create dummy tasks for them for now. # Otherwise the task dependencies might end up being pruned if scheduling takes a long time for dep in task.deps or []: t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority)) t.stakeholders.add(worker_id) self._update_priority(task, priority, worker_id) if runnable and status != FAILED and worker_enabled: task.workers.add(worker_id) self._state.get_worker(worker_id).tasks.add(task) task.runnable = runnable def add_worker(self, worker, info, **kwargs): self._state.get_worker(worker).add_info(info) def disable_worker(self, worker): self._state.disable_workers({worker}) def update_resources(self, **resources): if self._resources is None: self._resources = {} self._resources.update(resources) def _has_resources(self, needed_resources, used_resources): if needed_resources is None: return True available_resources = self._resources or {} for resource, amount in six.iteritems(needed_resources): if amount + used_resources[resource] > available_resources.get(resource, 1): return False return True def _used_resources(self): used_resources = collections.defaultdict(int) if self._resources is not None: for task in self._state.get_active_tasks(status=RUNNING): if task.resources: for resource, amount in six.iteritems(task.resources): used_resources[resource] += amount return used_resources def _rank(self, task): """ Return worker's rank function for task scheduling. :return: """ return task.priority, -task.time def _schedulable(self, task): if task.status != PENDING: return False for dep in task.deps: dep_task = self._state.get_task(dep, default=None) if dep_task is None or dep_task.status != DONE: return False return True def _retry_time(self, task, config): return time.time() + config.retry_delay def get_work(self, host=None, assistant=False, current_tasks=None, **kwargs): # TODO: remove any expired nodes # Algo: iterate over all nodes, find the highest priority node no dependencies and available # resources. # Resource checking looks both at currently available resources and at which resources would # be available if all running tasks died and we rescheduled all workers greedily. We do both # checks in order to prevent a worker with many low-priority tasks from starving other # workers with higher priority tasks that share the same resources. # TODO: remove tasks that can't be done, figure out if the worker has absolutely # nothing it can wait for if self._config.prune_on_get_work: self.prune() worker_id = kwargs['worker'] # Return remaining tasks that have no FAILED descendants self.update(worker_id, {'host': host}, get_work=True) if assistant: self.add_worker(worker_id, [('assistant', assistant)]) best_task = None if current_tasks is not None: ct_set = set(current_tasks) for task in sorted(self._state.get_running_tasks(), key=self._rank): if task.worker_running == worker_id and task.id not in ct_set: best_task = task locally_pending_tasks = 0 running_tasks = [] upstream_table = {} greedy_resources = collections.defaultdict(int) n_unique_pending = 0 worker = self._state.get_worker(worker_id) if worker.is_trivial_worker(self._state): relevant_tasks = worker.get_pending_tasks(self._state) used_resources = collections.defaultdict(int) greedy_workers = dict() # If there's no resources, then they can grab any task else: relevant_tasks = self._state.get_pending_tasks() used_resources = self._used_resources() activity_limit = time.time() - self._config.worker_disconnect_delay active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit) greedy_workers = dict((worker.id, worker.info.get('workers', 1)) for worker in active_workers) tasks = list(relevant_tasks) tasks.sort(key=self._rank, reverse=True) for task in tasks: in_workers = (assistant and getattr(task, 'runnable', bool(task.workers))) or worker_id in task.workers if task.status == RUNNING and in_workers: # Return a list of currently running tasks to the client, # makes it easier to troubleshoot other_worker = self._state.get_worker(task.worker_running) more_info = {'task_id': task.id, 'worker': str(other_worker)} if other_worker is not None: more_info.update(other_worker.info) running_tasks.append(more_info) if task.status == PENDING and in_workers: upstream_status = self._upstream_status(task.id, upstream_table) if upstream_status != UPSTREAM_DISABLED: locally_pending_tasks += 1 if len(task.workers) == 1 and not assistant: n_unique_pending += 1 if best_task: continue if task.status == RUNNING and (task.worker_running in greedy_workers): greedy_workers[task.worker_running] -= 1 for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount if self._schedulable(task) and self._has_resources(task.resources, greedy_resources): if in_workers and self._has_resources(task.resources, used_resources): best_task = task else: workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers for task_worker in workers: if greedy_workers.get(task_worker, 0) > 0: # use up a worker greedy_workers[task_worker] -= 1 # keep track of the resources used in greedy scheduling for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount break reply = {'n_pending_tasks': locally_pending_tasks, 'running_tasks': running_tasks, 'task_id': None, 'n_unique_pending': n_unique_pending} if best_task: self._state.set_status(best_task, RUNNING, self._config) best_task.worker_running = worker_id best_task.time_running = time.time() self._update_task_history(best_task, RUNNING, host=host) reply['task_id'] = best_task.id reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = best_task.params return reply def ping(self, **kwargs): worker_id = kwargs['worker'] self.update(worker_id) def _upstream_status(self, task_id, upstream_status_table): if task_id in upstream_status_table: return upstream_status_table[task_id] elif self._state.has_task(task_id): task_stack = [task_id] while task_stack: dep_id = task_stack.pop() dep = self._state.get_task(dep_id) if dep: if dep.status == DONE: continue if dep_id not in upstream_status_table: if dep.status == PENDING and dep.deps: task_stack += [dep_id] + list(dep.deps) upstream_status_table[dep_id] = '' # will be updated postorder else: dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '') upstream_status_table[dep_id] = dep_status elif upstream_status_table[dep_id] == '' and dep.deps: # This is the postorder update step when we set the # status based on the previously calculated child elements status = max((upstream_status_table.get(a_task_id, '') for a_task_id in dep.deps), key=UPSTREAM_SEVERITY_KEY) upstream_status_table[dep_id] = status return upstream_status_table[dep_id] def _serialize_task(self, task_id, include_deps=True, deps=None): task = self._state.get_task(task_id) ret = { 'display_name': task.pretty_id, 'status': task.status, 'workers': list(task.workers), 'worker_running': task.worker_running, 'time_running': getattr(task, "time_running", None), 'start_time': task.time, 'last_updated': getattr(task, "updated", task.time), 'params': task.params, 'name': task.family, 'priority': task.priority, 'resources': task.resources, 'tracking_url': getattr(task, "tracking_url", None), 'status_message': task.status_message } if task.status == DISABLED: ret['re_enable_able'] = task.scheduler_disable_time is not None if include_deps: ret['deps'] = list(task.deps if deps is None else deps) return ret def graph(self, **kwargs): self.prune() serialized = {} seen = set() for task in self._state.get_active_tasks(): serialized.update(self._traverse_graph(task.id, seen)) return serialized def _filter_done(self, task_ids): for task_id in task_ids: task = self._state.get_task(task_id) if task is None or task.status != DONE: yield task_id def _traverse_graph(self, root_task_id, seen=None, dep_func=None, include_done=True): """ Returns the dependency graph rooted at task_id This does a breadth-first traversal to find the nodes closest to the root before hitting the scheduler.max_graph_nodes limit. :param root_task_id: the id of the graph's root :return: A map of task id to serialized node """ if seen is None: seen = set() elif root_task_id in seen: return {} if dep_func is None: def dep_func(t): return t.deps seen.add(root_task_id) serialized = {} queue = collections.deque([root_task_id]) while queue: task_id = queue.popleft() task = self._state.get_task(task_id) if task is None or not task.family: logger.warn('Missing task for id [%s]', task_id) # NOTE : If a dependency is missing from self._state there is no way to deduce the # task family and parameters. family_match = TASK_FAMILY_RE.match(task_id) family = family_match.group(1) if family_match else UNKNOWN params = {'task_id': task_id} serialized[task_id] = { 'deps': [], 'status': UNKNOWN, 'workers': [], 'start_time': UNKNOWN, 'params': params, 'name': family, 'display_name': task_id, 'priority': 0, } else: deps = dep_func(task) if not include_done: deps = list(self._filter_done(deps)) serialized[task_id] = self._serialize_task(task_id, deps=deps) for dep in sorted(deps): if dep not in seen: seen.add(dep) queue.append(dep) if task_id != root_task_id: del serialized[task_id]['display_name'] if len(serialized) >= self._config.max_graph_nodes: break return serialized def dep_graph(self, task_id, include_done=True, **kwargs): self.prune() if not self._state.has_task(task_id): return {} return self._traverse_graph(task_id, include_done=include_done) def inverse_dep_graph(self, task_id, include_done=True, **kwargs): self.prune() if not self._state.has_task(task_id): return {} inverse_graph = collections.defaultdict(set) for task in self._state.get_active_tasks(): for dep in task.deps: inverse_graph[dep].add(task.id) return self._traverse_graph( task_id, dep_func=lambda t: inverse_graph[t.id], include_done=include_done) def task_list(self, status, upstream_status, limit=True, search=None, **kwargs): """ Query for a subset of tasks by status. """ self.prune() result = {} upstream_status_table = {} # used to memoize upstream status if search is None: def filter_func(_): return True else: terms = search.split() def filter_func(t): return all(term in t.pretty_id for term in terms) for task in filter(filter_func, self._state.get_active_tasks(status)): if (task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table)): serialized = self._serialize_task(task.id, False) result[task.id] = serialized if limit and len(result) > self._config.max_shown_tasks: return {'num_tasks': len(result)} return result def _first_task_display_name(self, worker): task_id = worker.info.get('first_task', '') if self._state.has_task(task_id): return self._state.get_task(task_id).pretty_id else: return task_id def worker_list(self, include_running=True, **kwargs): self.prune() workers = [ dict( name=worker.id, last_active=worker.last_active, started=getattr(worker, 'started', None), first_task_display_name=self._first_task_display_name(worker), **worker.info ) for worker in self._state.get_active_workers()] workers.sort(key=lambda worker: worker['started'], reverse=True) if include_running: running = collections.defaultdict(dict) num_pending = collections.defaultdict(int) num_uniques = collections.defaultdict(int) for task in self._state.get_pending_tasks(): if task.status == RUNNING and task.worker_running: running[task.worker_running][task.id] = self._serialize_task(task.id, False) elif task.status == PENDING: for worker in task.workers: num_pending[worker] += 1 if len(task.workers) == 1: num_uniques[list(task.workers)[0]] += 1 for worker in workers: tasks = running[worker['name']] worker['num_running'] = len(tasks) worker['num_pending'] = num_pending[worker['name']] worker['num_uniques'] = num_uniques[worker['name']] worker['running'] = tasks return workers def resource_list(self): """ Resources usage info and their consumers (tasks). """ self.prune() resources = [ dict( name=resource, num_total=r_dict['total'], num_used=r_dict['used'] ) for resource, r_dict in six.iteritems(self.resources())] if self._resources is not None: consumers = collections.defaultdict(dict) for task in self._state.get_running_tasks(): if task.status == RUNNING and task.resources: for resource, amount in six.iteritems(task.resources): consumers[resource][task.id] = self._serialize_task(task.id, False) for resource in resources: tasks = consumers[resource['name']] resource['num_consumer'] = len(tasks) resource['running'] = tasks return resources def resources(self): ''' get total resources and available ones ''' used_resources = self._used_resources() ret = collections.defaultdict(dict) for resource, total in six.iteritems(self._resources): ret[resource]['total'] = total if resource in used_resources: ret[resource]['used'] = used_resources[resource] else: ret[resource]['used'] = 0 return ret def task_search(self, task_str, **kwargs): """ Query for a subset of tasks by task_id. :param task_str: :return: """ self.prune() result = collections.defaultdict(dict) for task in self._state.get_active_tasks(): if task.id.find(task_str) != -1: serialized = self._serialize_task(task.id, False) result[task.status][task.id] = serialized return result def re_enable_task(self, task_id): serialized = {} task = self._state.get_task(task_id) if task and task.status == DISABLED and task.scheduler_disable_time: self._state.re_enable(task, self._config) serialized = self._serialize_task(task_id) return serialized def fetch_error(self, task_id, **kwargs): if self._state.has_task(task_id): task = self._state.get_task(task_id) return {"taskId": task_id, "error": task.expl, 'displayName': task.pretty_id} else: return {"taskId": task_id, "error": ""} def set_task_status_message(self, task_id, status_message): if self._state.has_task(task_id): task = self._state.get_task(task_id) task.status_message = status_message def get_task_status_message(self, task_id): if self._state.has_task(task_id): task = self._state.get_task(task_id) return {"taskId": task_id, "statusMessage": task.status_message} else: return {"taskId": task_id, "statusMessage": ""} def _update_task_history(self, task, status, host=None): try: if status == DONE or status == FAILED: successful = (status == DONE) self._task_history.task_finished(task, successful) elif status == PENDING: self._task_history.task_scheduled(task) elif status == RUNNING: self._task_history.task_started(task, host) except BaseException: logger.warning("Error saving Task history", exc_info=True) @property def task_history(self): # Used by server.py to expose the calls return self._task_history # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The system for scheduling tasks and executing them in order. Deals with dependencies, priorities, resources, etc. The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them. See :doc:`/central_scheduler` for more info. """ import collections try: import cPickle as pickle except ImportError: import pickle import functools import itertools import logging import os import re import time from luigi import six from luigi import configuration from luigi import notifications from luigi import parameter from luigi import task_history as history from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN from luigi.task import Config logger = logging.getLogger("luigi.server") class Scheduler(object): """ Abstract base class. Note that the methods all take string arguments, not Task objects... """"" add_task = NotImplemented get_work = NotImplemented ping = NotImplemented UPSTREAM_RUNNING = 'UPSTREAM_RUNNING' UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT' UPSTREAM_FAILED = 'UPSTREAM_FAILED' UPSTREAM_DISABLED = 'UPSTREAM_DISABLED' UPSTREAM_SEVERITY_ORDER = ( '', UPSTREAM_RUNNING, UPSTREAM_MISSING_INPUT, UPSTREAM_FAILED, UPSTREAM_DISABLED, ) UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index STATUS_TO_UPSTREAM_MAP = { FAILED: UPSTREAM_FAILED, RUNNING: UPSTREAM_RUNNING, PENDING: UPSTREAM_MISSING_INPUT, DISABLED: UPSTREAM_DISABLED, } TASK_FAMILY_RE = re.compile(r'([^(_]+)[(_]') class scheduler(Config): # TODO(erikbern): the config_path is needed for backwards compatilibity. We # should drop the compatibility at some point retry_delay = parameter.FloatParameter(default=900.0) remove_delay = parameter.FloatParameter(default=600.0) worker_disconnect_delay = parameter.FloatParameter(default=60.0) state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle') # Jobs are disabled if we see more than disable_failures failures in disable_window seconds. # These disables last for disable_persist seconds. disable_window = parameter.IntParameter(default=3600, config_path=dict(section='scheduler', name='disable-window-seconds')) disable_failures = parameter.IntParameter(default=999999999, config_path=dict(section='scheduler', name='disable-num-failures')) disable_hard_timeout = parameter.IntParameter(default=999999999, config_path=dict(section='scheduler', name='disable-hard-timeout')) disable_persist = parameter.IntParameter(default=86400, config_path=dict(section='scheduler', name='disable-persist-seconds')) max_shown_tasks = parameter.IntParameter(default=100000) max_graph_nodes = parameter.IntParameter(default=100000) prune_done_tasks = parameter.BoolParameter(default=False) record_task_history = parameter.BoolParameter(default=False) prune_on_get_work = parameter.BoolParameter(default=False) class Failures(object): """ This class tracks the number of failures in a given time window. Failures added are marked with the current timestamp, and this class counts the number of failures in a sliding time window ending at the present. """ def __init__(self, window): """ Initialize with the given window. :param window: how long to track failures for, as a float (number of seconds). """ self.window = window self.failures = collections.deque() self.first_failure_time = None def add_failure(self): """ Add a failure event with the current timestamp. """ failure_time = time.time() if not self.first_failure_time: self.first_failure_time = failure_time self.failures.append(failure_time) def num_failures(self): """ Return the number of failures in the window. """ min_time = time.time() - self.window while self.failures and self.failures[0] < min_time: self.failures.popleft() return len(self.failures) def clear(self): """ Clear the failure queue. """ self.failures.clear() def _get_default(x, default): if x is not None: return x else: return default class Task(object): def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None, params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None, tracking_url=None, status_message=None): self.id = task_id self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active) self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active if deps is None: self.deps = set() else: self.deps = set(deps) self.status = status # PENDING, RUNNING, FAILED or DONE self.time = time.time() # Timestamp when task was first added self.updated = self.time self.retry = None self.remove = None self.worker_running = None # the worker id that is currently running the task or None self.time_running = None # Timestamp when picked up by worker self.expl = None self.priority = priority self.resources = _get_default(resources, {}) self.family = family self.module = module self.params = _get_default(params, {}) self.disable_failures = disable_failures self.disable_hard_timeout = disable_hard_timeout self.failures = Failures(disable_window) self.tracking_url = tracking_url self.status_message = status_message self.scheduler_disable_time = None self.runnable = False def __repr__(self): return "Task(%r)" % vars(self) def add_failure(self): self.failures.add_failure() def has_excessive_failures(self): if self.failures.first_failure_time is not None: if (time.time() >= self.failures.first_failure_time + self.disable_hard_timeout): return True if self.failures.num_failures() >= self.disable_failures: return True return False @property def pretty_id(self): param_str = ', '.join('{}={}'.format(key, value) for key, value in self.params.items()) return '{}({})'.format(self.family, param_str) class Worker(object): """ Structure for tracking worker activity and keeping their references. """ def __init__(self, worker_id, last_active=None): self.id = worker_id self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host) self.last_active = last_active or time.time() # seconds since epoch self.last_get_work = None self.started = time.time() # seconds since epoch self.tasks = set() # task objects self.info = {} self.disabled = False def add_info(self, info): self.info.update(info) def update(self, worker_reference, get_work=False): if worker_reference: self.reference = worker_reference self.last_active = time.time() if get_work: self.last_get_work = time.time() def prune(self, config): # Delete workers that haven't said anything for a while (probably killed) if self.last_active + config.worker_disconnect_delay < time.time(): return True def get_pending_tasks(self, state): """ Get PENDING (and RUNNING) tasks for this worker. You have to pass in the state for optimization reasons. """ if len(self.tasks) < state.num_pending_tasks(): return six.moves.filter(lambda task: task.status in [PENDING, RUNNING], self.tasks) else: return state.get_pending_tasks() def is_trivial_worker(self, state): """ If it's not an assistant having only tasks that are without requirements. We have to pass the state parameter for optimization reasons. """ if self.assistant: return False return all(not task.resources for task in self.get_pending_tasks(state)) @property def assistant(self): return self.info.get('assistant', False) def __str__(self): return self.id class SimpleTaskState(object): """ Keep track of the current state and handle persistance. The point of this class is to enable other ways to keep state, eg. by using a database These will be implemented by creating an abstract base class that this and other classes inherit from. """ def __init__(self, state_path): self._state_path = state_path self._tasks = {} # map from id to a Task object self._status_tasks = collections.defaultdict(dict) self._active_workers = {} # map from id to a Worker object def get_state(self): return self._tasks, self._active_workers def set_state(self, state): self._tasks, self._active_workers = state def dump(self): try: with open(self._state_path, 'wb') as fobj: pickle.dump(self.get_state(), fobj) except IOError: logger.warning("Failed saving scheduler state", exc_info=1) else: logger.info("Saved state in %s", self._state_path) # prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control? def load(self): if os.path.exists(self._state_path): logger.info("Attempting to load state from %s", self._state_path) try: with open(self._state_path, 'rb') as fobj: state = pickle.load(fobj) except BaseException: logger.exception("Error when loading state. Starting from empty state.") return self.set_state(state) self._status_tasks = collections.defaultdict(dict) for task in six.itervalues(self._tasks): self._status_tasks[task.status][task.id] = task else: logger.info("No prior state file exists at %s. Starting with empty state", self._state_path) def get_active_tasks(self, status=None): if status: for task in six.itervalues(self._status_tasks[status]): yield task else: for task in six.itervalues(self._tasks): yield task def get_running_tasks(self): return six.itervalues(self._status_tasks[RUNNING]) def get_pending_tasks(self): return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status]) for status in [PENDING, RUNNING]) def num_pending_tasks(self): """ Return how many tasks are PENDING + RUNNING. O(1). """ return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING]) def get_task(self, task_id, default=None, setdefault=None): if setdefault: task = self._tasks.setdefault(task_id, setdefault) self._status_tasks[task.status][task.id] = task return task else: return self._tasks.get(task_id, default) def has_task(self, task_id): return task_id in self._tasks def re_enable(self, task, config=None): task.scheduler_disable_time = None task.failures.clear() if config: self.set_status(task, FAILED, config) task.failures.clear() def set_status(self, task, new_status, config=None): if new_status == FAILED: assert config is not None if new_status == DISABLED and task.status == RUNNING: return if task.status == DISABLED: if new_status == DONE: self.re_enable(task) # don't allow workers to override a scheduler disable elif task.scheduler_disable_time is not None and new_status != DISABLED: return if new_status == FAILED and task.status != DISABLED: task.add_failure() if task.has_excessive_failures(): task.scheduler_disable_time = time.time() new_status = DISABLED notifications.send_error_email( 'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id), '{task} failed {failures} times in the last {window} seconds, so it is being ' 'disabled for {persist} seconds'.format( failures=config.disable_failures, task=task.id, window=config.disable_window, persist=config.disable_persist, )) elif new_status == DISABLED: task.scheduler_disable_time = None if new_status != task.status: self._status_tasks[task.status].pop(task.id) self._status_tasks[new_status][task.id] = task task.status = new_status task.updated = time.time() def fail_dead_worker_task(self, task, config, assistants): # If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants: logger.info("Task %r is marked as running by disconnected worker %r -> marking as " "FAILED with retry delay of %rs", task.id, task.worker_running, config.retry_delay) task.worker_running = None self.set_status(task, FAILED, config) task.retry = time.time() + config.retry_delay def update_status(self, task, config): # Mark tasks with no remaining active stakeholders for deletion if not task.stakeholders: if task.remove is None: logger.info("Task %r has stakeholders %r but none remain connected -> might remove " "task in %s seconds", task.id, task.stakeholders, config.remove_delay) task.remove = time.time() + config.remove_delay # Re-enable task after the disable time expires if task.status == DISABLED and task.scheduler_disable_time is not None: if time.time() - task.scheduler_disable_time > config.disable_persist: self.re_enable(task, config) # Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0 if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time(): self.set_status(task, PENDING, config) def may_prune(self, task): return task.remove and time.time() > task.remove def inactivate_tasks(self, delete_tasks): # The terminology is a bit confusing: we used to "delete" tasks when they became inactive, # but with a pluggable state storage, you might very well want to keep some history of # older tasks as well. That's why we call it "inactivate" (as in the verb) for task in delete_tasks: task_obj = self._tasks.pop(task) self._status_tasks[task_obj.status].pop(task) def get_active_workers(self, last_active_lt=None, last_get_work_gt=None): for worker in six.itervalues(self._active_workers): if last_active_lt is not None and worker.last_active >= last_active_lt: continue last_get_work = getattr(worker, 'last_get_work', None) if last_get_work_gt is not None and ( last_get_work is None or last_get_work <= last_get_work_gt): continue yield worker def get_assistants(self, last_active_lt=None): return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt)) def get_worker_ids(self): return self._active_workers.keys() # only used for unit tests def get_worker(self, worker_id): return self._active_workers.setdefault(worker_id, Worker(worker_id)) def inactivate_workers(self, delete_workers): # Mark workers as inactive for worker in delete_workers: self._active_workers.pop(worker) self._remove_workers_from_tasks(delete_workers) def _remove_workers_from_tasks(self, workers, remove_stakeholders=True): for task in self.get_active_tasks(): if remove_stakeholders: task.stakeholders.difference_update(workers) task.workers.difference_update(workers) def disable_workers(self, workers): self._remove_workers_from_tasks(workers, remove_stakeholders=False) for worker in workers: self.get_worker(worker).disabled = True def get_necessary_tasks(self): necessary_tasks = set() for task in self.get_active_tasks(): if task.status not in (DONE, DISABLED, UNKNOWN) or \ task.scheduler_disable_time is not None: necessary_tasks.update(task.deps) necessary_tasks.add(task.id) return necessary_tasks class CentralPlannerScheduler(Scheduler): """ Async scheduler that can handle multiple workers, etc. Can be run locally or on a server (using RemoteScheduler + server.Server). """ def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs): """ Keyword Arguments: :param config: an object of class "scheduler" or None (in which the global instance will be used) :param resources: a dict of str->int constraints :param task_history_override: ignore config and use this object as the task history """ self._config = config or scheduler(**kwargs) self._state = SimpleTaskState(self._config.state_path) if task_history_impl: self._task_history = task_history_impl elif self._config.record_task_history: from luigi import db_task_history # Needs sqlalchemy, thus imported here self._task_history = db_task_history.DbTaskHistory() else: self._task_history = history.NopHistory() self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter? self._make_task = functools.partial( Task, disable_failures=self._config.disable_failures, disable_hard_timeout=self._config.disable_hard_timeout, disable_window=self._config.disable_window) self._worker_requests = {} def load(self): self._state.load() def dump(self): self._state.dump() def prune(self): logger.info("Starting pruning of task graph") self._prune_workers() self._prune_tasks() logger.info("Done pruning task graph") def _prune_workers(self): remove_workers = [] for worker in self._state.get_active_workers(): if worker.prune(self._config): logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay) remove_workers.append(worker.id) self._state.inactivate_workers(remove_workers) def _prune_tasks(self): assistant_ids = set(w.id for w in self._state.get_assistants()) remove_tasks = [] if assistant_ids: necessary_tasks = self._state.get_necessary_tasks() else: necessary_tasks = () for task in self._state.get_active_tasks(): self._state.fail_dead_worker_task(task, self._config, assistant_ids) self._state.update_status(task, self._config) if self._state.may_prune(task) and task.id not in necessary_tasks: logger.info("Removing task %r", task.id) remove_tasks.append(task.id) self._state.inactivate_tasks(remove_tasks) def update(self, worker_id, worker_reference=None, get_work=False): """ Keep track of whenever the worker was last active. """ worker = self._state.get_worker(worker_id) worker.update(worker_reference, get_work=get_work) return not getattr(worker, 'disabled', False) def _update_priority(self, task, prio, worker): """ Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled. """ task.priority = prio = max(prio, task.priority) for dep in task.deps or []: t = self._state.get_task(dep) if t is not None and prio > t.priority: self._update_priority(t, prio, worker) def add_task(self, task_id=None, status=PENDING, runnable=True, deps=None, new_deps=None, expl=None, resources=None, priority=0, family='', module=None, params=None, assistant=False, tracking_url=None, **kwargs): """ * add task identified by task_id if it doesn't exist * if deps is not None, update dependency list * update status of task * add additional workers/stakeholders * update priority when needed """ worker_id = kwargs['worker'] worker_enabled = self.update(worker_id) if worker_enabled: _default_task = self._make_task( task_id=task_id, status=PENDING, deps=deps, resources=resources, priority=priority, family=family, module=module, params=params, ) else: _default_task = None task = self._state.get_task(task_id, setdefault=_default_task) if task is None or (task.status != RUNNING and not worker_enabled): return # for setting priority, we'll sometimes create tasks with unset family and params if not task.family: task.family = family if not getattr(task, 'module', None): task.module = module if not task.params: task.params = _get_default(params, {}) if tracking_url is not None or task.status != RUNNING: task.tracking_url = tracking_url if task.remove is not None: task.remove = None # unmark task for removal so it isn't removed after being added if expl is not None: task.expl = expl if not (task.status == RUNNING and status == PENDING) or new_deps: # don't allow re-scheduling of task while it is running, it must either fail or succeed first if status == PENDING or status != task.status: # Update the DB only if there was a acctual change, to prevent noise. # We also check for status == PENDING b/c that's the default value # (so checking for status != task.status woule lie) self._update_task_history(task, status) self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config) if status == FAILED: task.retry = self._retry_time(task, self._config) if deps is not None: task.deps = set(deps) if new_deps is not None: task.deps.update(new_deps) if resources is not None: task.resources = resources if worker_enabled and not assistant: task.stakeholders.add(worker_id) # Task dependencies might not exist yet. Let's create dummy tasks for them for now. # Otherwise the task dependencies might end up being pruned if scheduling takes a long time for dep in task.deps or []: t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority)) t.stakeholders.add(worker_id) self._update_priority(task, priority, worker_id) if runnable and status != FAILED and worker_enabled: task.workers.add(worker_id) self._state.get_worker(worker_id).tasks.add(task) task.runnable = runnable def add_worker(self, worker, info, **kwargs): self._state.get_worker(worker).add_info(info) def disable_worker(self, worker): self._state.disable_workers({worker}) def update_resources(self, **resources): if self._resources is None: self._resources = {} self._resources.update(resources) def _has_resources(self, needed_resources, used_resources): if needed_resources is None: return True available_resources = self._resources or {} for resource, amount in six.iteritems(needed_resources): if amount + used_resources[resource] > available_resources.get(resource, 1): return False return True def _used_resources(self): used_resources = collections.defaultdict(int) if self._resources is not None: for task in self._state.get_active_tasks(status=RUNNING): if task.resources: for resource, amount in six.iteritems(task.resources): used_resources[resource] += amount return used_resources def _rank(self, task): """ Return worker's rank function for task scheduling. :return: """ return task.priority, -task.time def _schedulable(self, task): if task.status != PENDING: return False for dep in task.deps: dep_task = self._state.get_task(dep, default=None) if dep_task is None or dep_task.status != DONE: return False return True def _retry_time(self, task, config): return time.time() + config.retry_delay def get_work(self, host=None, assistant=False, current_tasks=None, **kwargs): # TODO: remove any expired nodes # Algo: iterate over all nodes, find the highest priority node no dependencies and available # resources. # Resource checking looks both at currently available resources and at which resources would # be available if all running tasks died and we rescheduled all workers greedily. We do both # checks in order to prevent a worker with many low-priority tasks from starving other # workers with higher priority tasks that share the same resources. # TODO: remove tasks that can't be done, figure out if the worker has absolutely # nothing it can wait for if self._config.prune_on_get_work: self.prune() worker_id = kwargs['worker'] # Return remaining tasks that have no FAILED descendants self.update(worker_id, {'host': host}, get_work=True) if assistant: self.add_worker(worker_id, [('assistant', assistant)]) best_task = None if current_tasks is not None: ct_set = set(current_tasks) for task in sorted(self._state.get_running_tasks(), key=self._rank): if task.worker_running == worker_id and task.id not in ct_set: best_task = task locally_pending_tasks = 0 running_tasks = [] upstream_table = {} greedy_resources = collections.defaultdict(int) n_unique_pending = 0 worker = self._state.get_worker(worker_id) if worker.is_trivial_worker(self._state): relevant_tasks = worker.get_pending_tasks(self._state) used_resources = collections.defaultdict(int) greedy_workers = dict() # If there's no resources, then they can grab any task else: relevant_tasks = self._state.get_pending_tasks() used_resources = self._used_resources() activity_limit = time.time() - self._config.worker_disconnect_delay active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit) greedy_workers = dict((worker.id, worker.info.get('workers', 1)) for worker in active_workers) tasks = list(relevant_tasks) tasks.sort(key=self._rank, reverse=True) for task in tasks: in_workers = (assistant and getattr(task, 'runnable', bool(task.workers))) or worker_id in task.workers if task.status == RUNNING and in_workers: # Return a list of currently running tasks to the client, # makes it easier to troubleshoot other_worker = self._state.get_worker(task.worker_running) more_info = {'task_id': task.id, 'worker': str(other_worker)} if other_worker is not None: more_info.update(other_worker.info) running_tasks.append(more_info) if task.status == PENDING and in_workers: upstream_status = self._upstream_status(task.id, upstream_table) if upstream_status != UPSTREAM_DISABLED: locally_pending_tasks += 1 if len(task.workers) == 1 and not assistant: n_unique_pending += 1 if best_task: continue if task.status == RUNNING and (task.worker_running in greedy_workers): greedy_workers[task.worker_running] -= 1 for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount if self._schedulable(task) and self._has_resources(task.resources, greedy_resources): if in_workers and self._has_resources(task.resources, used_resources): best_task = task else: workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers for task_worker in workers: if greedy_workers.get(task_worker, 0) > 0: # use up a worker greedy_workers[task_worker] -= 1 # keep track of the resources used in greedy scheduling for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount break reply = {'n_pending_tasks': locally_pending_tasks, 'running_tasks': running_tasks, 'task_id': None, 'n_unique_pending': n_unique_pending} if best_task: self._state.set_status(best_task, RUNNING, self._config) best_task.worker_running = worker_id best_task.time_running = time.time() self._update_task_history(best_task, RUNNING, host=host) reply['task_id'] = best_task.id reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = best_task.params return reply def ping(self, **kwargs): worker_id = kwargs['worker'] self.update(worker_id) def _upstream_status(self, task_id, upstream_status_table): if task_id in upstream_status_table: return upstream_status_table[task_id] elif self._state.has_task(task_id): task_stack = [task_id] while task_stack: dep_id = task_stack.pop() dep = self._state.get_task(dep_id) if dep: if dep.status == DONE: continue if dep_id not in upstream_status_table: if dep.status == PENDING and dep.deps: task_stack += [dep_id] + list(dep.deps) upstream_status_table[dep_id] = '' # will be updated postorder else: dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '') upstream_status_table[dep_id] = dep_status elif upstream_status_table[dep_id] == '' and dep.deps: # This is the postorder update step when we set the # status based on the previously calculated child elements status = max((upstream_status_table.get(a_task_id, '') for a_task_id in dep.deps), key=UPSTREAM_SEVERITY_KEY) upstream_status_table[dep_id] = status return upstream_status_table[dep_id] def _serialize_task(self, task_id, include_deps=True, deps=None): task = self._state.get_task(task_id) ret = { 'display_name': task.pretty_id, 'status': task.status, 'workers': list(task.workers), 'worker_running': task.worker_running, 'time_running': getattr(task, "time_running", None), 'start_time': task.time, 'last_updated': getattr(task, "updated", task.time), 'params': task.params, 'name': task.family, 'priority': task.priority, 'resources': task.resources, 'tracking_url': getattr(task, "tracking_url", None), 'status_message': task.status_message } if task.status == DISABLED: ret['re_enable_able'] = task.scheduler_disable_time is not None if include_deps: ret['deps'] = list(task.deps if deps is None else deps) return ret def graph(self, **kwargs): self.prune() serialized = {} seen = set() for task in self._state.get_active_tasks(): serialized.update(self._traverse_graph(task.id, seen)) return serialized def _filter_done(self, task_ids): for task_id in task_ids: task = self._state.get_task(task_id) if task is None or task.status != DONE: yield task_id def _traverse_graph(self, root_task_id, seen=None, dep_func=None, include_done=True): """ Returns the dependency graph rooted at task_id This does a breadth-first traversal to find the nodes closest to the root before hitting the scheduler.max_graph_nodes limit. :param root_task_id: the id of the graph's root :return: A map of task id to serialized node """ if seen is None: seen = set() elif root_task_id in seen: return {} if dep_func is None: def dep_func(t): return t.deps seen.add(root_task_id) serialized = {} queue = collections.deque([root_task_id]) while queue: task_id = queue.popleft() task = self._state.get_task(task_id) if task is None or not task.family: logger.warn('Missing task for id [%s]', task_id) # NOTE : If a dependency is missing from self._state there is no way to deduce the # task family and parameters. family_match = TASK_FAMILY_RE.match(task_id) family = family_match.group(1) if family_match else UNKNOWN params = {'task_id': task_id} serialized[task_id] = { 'deps': [], 'status': UNKNOWN, 'workers': [], 'start_time': UNKNOWN, 'params': params, 'name': family, 'display_name': task_id, 'priority': 0, } else: deps = dep_func(task) if not include_done: deps = list(self._filter_done(deps)) serialized[task_id] = self._serialize_task(task_id, deps=deps) for dep in sorted(deps): if dep not in seen: seen.add(dep) queue.append(dep) if task_id != root_task_id: del serialized[task_id]['display_name'] if len(serialized) >= self._config.max_graph_nodes: break return serialized def dep_graph(self, task_id, include_done=True, **kwargs): self.prune() if not self._state.has_task(task_id): return {} return self._traverse_graph(task_id, include_done=include_done) def inverse_dep_graph(self, task_id, include_done=True, **kwargs): self.prune() if not self._state.has_task(task_id): return {} inverse_graph = collections.defaultdict(set) for task in self._state.get_active_tasks(): for dep in task.deps: inverse_graph[dep].add(task.id) return self._traverse_graph( task_id, dep_func=lambda t: inverse_graph[t.id], include_done=include_done) def task_list(self, status, upstream_status, limit=True, search=None, **kwargs): """ Query for a subset of tasks by status. """ self.prune() result = {} upstream_status_table = {} # used to memoize upstream status if search is None: def filter_func(_): return True else: terms = search.split() def filter_func(t): return all(term in t.pretty_id for term in terms) for task in filter(filter_func, self._state.get_active_tasks(status)): if (task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table)): serialized = self._serialize_task(task.id, False) result[task.id] = serialized if limit and len(result) > self._config.max_shown_tasks: return {'num_tasks': len(result)} return result def _first_task_display_name(self, worker): task_id = worker.info.get('first_task', '') if self._state.has_task(task_id): return self._state.get_task(task_id).pretty_id else: return task_id def worker_list(self, include_running=True, **kwargs): self.prune() workers = [ dict( name=worker.id, last_active=worker.last_active, started=getattr(worker, 'started', None), first_task_display_name=self._first_task_display_name(worker), **worker.info ) for worker in self._state.get_active_workers()] workers.sort(key=lambda worker: worker['started'], reverse=True) if include_running: running = collections.defaultdict(dict) num_pending = collections.defaultdict(int) num_uniques = collections.defaultdict(int) for task in self._state.get_pending_tasks(): if task.status == RUNNING and task.worker_running: running[task.worker_running][task.id] = self._serialize_task(task.id, False) elif task.status == PENDING: for worker in task.workers: num_pending[worker] += 1 if len(task.workers) == 1: num_uniques[list(task.workers)[0]] += 1 for worker in workers: tasks = running[worker['name']] worker['num_running'] = len(tasks) worker['num_pending'] = num_pending[worker['name']] worker['num_uniques'] = num_uniques[worker['name']] worker['running'] = tasks return workers def resource_list(self): """ Resources usage info and their consumers (tasks). """ self.prune() resources = [ dict( name=resource, num_total=r_dict['total'], num_used=r_dict['used'] ) for resource, r_dict in six.iteritems(self.resources())] if self._resources is not None: consumers = collections.defaultdict(dict) for task in self._state.get_running_tasks(): if task.status == RUNNING and task.resources: for resource, amount in six.iteritems(task.resources): consumers[resource][task.id] = self._serialize_task(task.id, False) for resource in resources: tasks = consumers[resource['name']] resource['num_consumer'] = len(tasks) resource['running'] = tasks return resources def resources(self): ''' get total resources and available ones ''' used_resources = self._used_resources() ret = collections.defaultdict(dict) for resource, total in six.iteritems(self._resources): ret[resource]['total'] = total if resource in used_resources: ret[resource]['used'] = used_resources[resource] else: ret[resource]['used'] = 0 return ret def task_search(self, task_str, **kwargs): """ Query for a subset of tasks by task_id. :param task_str: :return: """ self.prune() result = collections.defaultdict(dict) for task in self._state.get_active_tasks(): if task.id.find(task_str) != -1: serialized = self._serialize_task(task.id, False) result[task.status][task.id] = serialized return result def re_enable_task(self, task_id): serialized = {} task = self._state.get_task(task_id) if task and task.status == DISABLED and task.scheduler_disable_time: self._state.re_enable(task, self._config) serialized = self._serialize_task(task_id) return serialized def fetch_error(self, task_id, **kwargs): if self._state.has_task(task_id): task = self._state.get_task(task_id) return {"taskId": task_id, "error": task.expl, 'displayName': task.pretty_id} else: return {"taskId": task_id, "error": ""} def set_task_status_message(self, task_id, status_message): if self._state.has_task(task_id): task = self._state.get_task(task_id) task.status_message = status_message def get_task_status_message(self, task_id): if self._state.has_task(task_id): task = self._state.get_task(task_id) return {"taskId": task_id, "statusMessage": task.status_message} else: return {"taskId": task_id, "statusMessage": ""} def _update_task_history(self, task, status, host=None): try: if status == DONE or status == FAILED: successful = (status == DONE) self._task_history.task_finished(task, successful) elif status == PENDING: self._task_history.task_scheduled(task) elif status == RUNNING: self._task_history.task_started(task, host) except BaseException: logger.warning("Error saving Task history", exc_info=True) @property def task_history(self): # Used by server.py to expose the calls return self._task_history
BugsInPy/BugsInPy/temp/projects/luigi/bug-14-fixed/luigi/luigi/scheduler.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-14-buggy/luigi/luigi/scheduler.py
luigi-bug-9
# -*- coding: utf-8 -*- # # Copyright 2015-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ This module provide the function :py:func:`summary` that is used for printing an `execution summary <https://github.com/spotify/luigi/blob/master/examples/execution_summary_example.py>`_ at the end of luigi invocations. """ import textwrap import collections import functools import luigi class execution_summary(luigi.Config): summary_length = luigi.IntParameter(default=5) def _partition_tasks(worker): """ Takes a worker and sorts out tasks based on their status. Still_pending_not_ext is only used to get upstream_failure, upstream_missing_dependency and run_by_other_worker """ task_history = worker._add_task_history pending_tasks = {task for(task, status, ext) in task_history if status == 'PENDING'} set_tasks = {} set_tasks["completed"] = {task for (task, status, ext) in task_history if status == 'DONE' and task in pending_tasks} set_tasks["already_done"] = {task for (task, status, ext) in task_history if status == 'DONE' and task not in pending_tasks and task not in set_tasks["completed"]} set_tasks["failed"] = {task for (task, status, ext) in task_history if status == 'FAILED'} set_tasks["scheduling_error"] = {task for(task, status, ext) in task_history if status == 'UNKNOWN'} set_tasks["still_pending_ext"] = {task for (task, status, ext) in task_history if status == 'PENDING' and task not in set_tasks["failed"] and task not in set_tasks["completed"] and not ext} set_tasks["still_pending_not_ext"] = {task for (task, status, ext) in task_history if status == 'PENDING' and task not in set_tasks["failed"] and task not in set_tasks["completed"] and ext} set_tasks["run_by_other_worker"] = set() set_tasks["upstream_failure"] = set() set_tasks["upstream_missing_dependency"] = set() set_tasks["upstream_run_by_other_worker"] = set() set_tasks["upstream_scheduling_error"] = set() set_tasks["not_run"] = set() return set_tasks def _root_task(worker): """ Return the first task scheduled by the worker, corresponding to the root task """ return worker._add_task_history[0][0] def _populate_unknown_statuses(set_tasks): """ Add the "upstream_*" and "not_run" statuses my mutating set_tasks. """ visited = set() for task in set_tasks["still_pending_not_ext"]: _depth_first_search(set_tasks, task, visited) def _depth_first_search(set_tasks, current_task, visited): """ This dfs checks why tasks are still pending. """ visited.add(current_task) if current_task in set_tasks["still_pending_not_ext"]: upstream_failure = False upstream_missing_dependency = False upstream_run_by_other_worker = False upstream_scheduling_error = False for task in current_task._requires(): if task not in visited: _depth_first_search(set_tasks, task, visited) if task in set_tasks["failed"] or task in set_tasks["upstream_failure"]: set_tasks["upstream_failure"].add(current_task) upstream_failure = True if task in set_tasks["still_pending_ext"] or task in set_tasks["upstream_missing_dependency"]: set_tasks["upstream_missing_dependency"].add(current_task) upstream_missing_dependency = True if task in set_tasks["run_by_other_worker"] or task in set_tasks["upstream_run_by_other_worker"]: set_tasks["upstream_run_by_other_worker"].add(current_task) upstream_run_by_other_worker = True if task in set_tasks["scheduling_error"]: set_tasks["upstream_scheduling_error"].add(current_task) upstream_scheduling_error = True if not upstream_failure and not upstream_missing_dependency and \ not upstream_run_by_other_worker and not upstream_scheduling_error and \ current_task not in set_tasks["run_by_other_worker"]: set_tasks["not_run"].add(current_task) def _get_str(task_dict, extra_indent): """ This returns a string for each status """ summary_length = execution_summary().summary_length lines = [] task_names = sorted(task_dict.keys()) for task_family in task_names: tasks = task_dict[task_family] tasks = sorted(tasks, key=lambda x: str(x)) prefix_size = 8 if extra_indent else 4 prefix = ' ' * prefix_size line = None if summary_length > 0 and len(lines) >= summary_length: line = prefix + "..." lines.append(line) break if len(tasks[0].get_params()) == 0: line = prefix + '- {0} {1}()'.format(len(tasks), str(task_family)) elif _get_len_of_params(tasks[0]) > 60 or len(str(tasks[0])) > 200 or \ (len(tasks) == 2 and len(tasks[0].get_params()) > 1 and (_get_len_of_params(tasks[0]) > 40 or len(str(tasks[0])) > 100)): """ This is to make sure that there is no really long task in the output """ line = prefix + '- {0} {1}(...)'.format(len(tasks), task_family) elif len((tasks[0].get_params())) == 1: attributes = {getattr(task, tasks[0].get_params()[0][0]) for task in tasks} param_class = tasks[0].get_params()[0][1] first, last = _ranging_attributes(attributes, param_class) if first is not None and last is not None and len(attributes) > 3: param_str = '{0}...{1}'.format(param_class.serialize(first), param_class.serialize(last)) else: param_str = '{0}'.format(_get_str_one_parameter(tasks)) line = prefix + '- {0} {1}({2}={3})'.format(len(tasks), task_family, tasks[0].get_params()[0][0], param_str) else: ranging = False params = _get_set_of_params(tasks) unique_param_keys = list(_get_unique_param_keys(params)) if len(unique_param_keys) == 1: unique_param, = unique_param_keys attributes = params[unique_param] param_class = unique_param[1] first, last = _ranging_attributes(attributes, param_class) if first is not None and last is not None and len(attributes) > 2: ranging = True line = prefix + '- {0} {1}({2}'.format(len(tasks), task_family, _get_str_ranging_multiple_parameters(first, last, tasks, unique_param)) if not ranging: if len(tasks) == 1: line = prefix + '- {0} {1}'.format(len(tasks), tasks[0]) if len(tasks) == 2: line = prefix + '- {0} {1} and {2}'.format(len(tasks), tasks[0], tasks[1]) if len(tasks) > 2: line = prefix + '- {0} {1} ...'.format(len(tasks), tasks[0]) lines.append(line) return '\n'.join(lines) def _get_len_of_params(task): return sum(len(param[0]) for param in task.get_params()) def _get_str_ranging_multiple_parameters(first, last, tasks, unique_param): row = '' str_unique_param = '{0}...{1}'.format(unique_param[1].serialize(first), unique_param[1].serialize(last)) for param in tasks[0].get_params(): row += '{0}='.format(param[0]) if param[0] == unique_param[0]: row += '{0}'.format(str_unique_param) else: row += '{0}'.format(param[1].serialize(getattr(tasks[0], param[0]))) if param != tasks[0].get_params()[-1]: row += ", " row += ')' return row def _get_set_of_params(tasks): params = {} for param in tasks[0].get_params(): params[param] = {getattr(task, param[0]) for task in tasks} return params def _get_unique_param_keys(params): for param_key, param_values in params.items(): if len(param_values) > 1: yield param_key def _ranging_attributes(attributes, param_class): """ Checks if there is a continuous range """ next_attributes = {param_class.next_in_enumeration(attribute) for attribute in attributes} in_first = attributes.difference(next_attributes) in_second = next_attributes.difference(attributes) if len(in_first) == 1 and len(in_second) == 1: for x in attributes: if {param_class.next_in_enumeration(x)} == in_second: return next(iter(in_first)), x return None, None def _get_str_one_parameter(tasks): row = '' count = 0 for task in tasks: if (len(row) >= 30 and count > 2 and count != len(tasks) - 1) or len(row) > 200: row += '...' break param = task.get_params()[0] row += '{0}'.format(param[1].serialize(getattr(task, param[0]))) if count < len(tasks) - 1: row += ',' count += 1 return row def _serialize_first_param(task): return task.get_params()[0][1].serialize(getattr(task, task.get_params()[0][0])) def _get_number_of_tasks_for(status, group_tasks): if status == "still_pending": return (_get_number_of_tasks(group_tasks["still_pending_ext"]) + _get_number_of_tasks(group_tasks["still_pending_not_ext"])) return _get_number_of_tasks(group_tasks[status]) def _get_number_of_tasks(task_dict): return sum(len(tasks) for tasks in task_dict.values()) def _get_comments(group_tasks): """ Get the human readable comments and quantities for the task types. """ comments = {} for status, human in _COMMENTS: num_tasks = _get_number_of_tasks_for(status, group_tasks) if num_tasks: space = " " if status in _PENDING_SUB_STATUSES else "" comments[status] = '{space}* {num_tasks} {human}:\n'.format( space=space, num_tasks=num_tasks, human=human) return comments # Oredered in the sense that they'll be printed in this order _ORDERED_STATUSES = ( "already_done", "completed", "failed", "scheduling_error", "still_pending", "still_pending_ext", "run_by_other_worker", "upstream_failure", "upstream_missing_dependency", "upstream_run_by_other_worker", "upstream_scheduling_error", "not_run", ) _PENDING_SUB_STATUSES = set(_ORDERED_STATUSES[_ORDERED_STATUSES.index("still_pending_ext"):]) _COMMENTS = set(( ("already_done", 'present dependencies were encountered'), ("completed", 'ran successfully'), ("failed", 'failed'), ("scheduling_error", 'failed scheduling'), ("still_pending", 'were left pending, among these'), ("still_pending_ext", 'were missing external dependencies'), ("run_by_other_worker", 'were being run by another worker'), ("upstream_failure", 'had failed dependencies'), ("upstream_missing_dependency", 'had missing external dependencies'), ("upstream_run_by_other_worker", 'had dependencies that were being run by other worker'), ("upstream_scheduling_error", 'had dependencies whose scheduling failed'), ("not_run", 'was not granted run permission by the scheduler'), )) def _get_run_by_other_worker(worker): """ This returns a set of the tasks that are being run by other worker """ task_sets = _get_external_workers(worker).values() return functools.reduce(lambda a, b: a | b, task_sets, set()) def _get_external_workers(worker): """ This returns a dict with a set of tasks for all of the other workers """ worker_that_blocked_task = collections.defaultdict(set) get_work_response_history = worker._get_work_response_history for get_work_response in get_work_response_history: if get_work_response['task_id'] is None: for running_task in get_work_response['running_tasks']: other_worker_id = running_task['worker'] other_task_id = running_task['task_id'] other_task = worker._scheduled_tasks.get(other_task_id) if other_worker_id == worker._id or not other_task: continue worker_that_blocked_task[other_worker_id].add(other_task) return worker_that_blocked_task def _group_tasks_by_name_and_status(task_dict): """ Takes a dictionary with sets of tasks grouped by their status and returns a dictionary with dictionaries with an array of tasks grouped by their status and task name """ group_status = {} for task in task_dict: if task.task_family not in group_status: group_status[task.task_family] = [] group_status[task.task_family].append(task) return group_status def _summary_dict(worker): set_tasks = _partition_tasks(worker) set_tasks["run_by_other_worker"] = _get_run_by_other_worker(worker) _populate_unknown_statuses(set_tasks) return set_tasks def _summary_format(set_tasks, worker): group_tasks = {} for status, task_dict in set_tasks.items(): group_tasks[status] = _group_tasks_by_name_and_status(task_dict) comments = _get_comments(group_tasks) num_all_tasks = sum([len(set_tasks["already_done"]), len(set_tasks["completed"]), len(set_tasks["failed"]), len(set_tasks["scheduling_error"]), len(set_tasks["still_pending_ext"]), len(set_tasks["still_pending_not_ext"])]) str_output = '' str_output += 'Scheduled {0} tasks of which:\n'.format(num_all_tasks) for status in _ORDERED_STATUSES: if status not in comments: continue str_output += '{0}'.format(comments[status]) if status != 'still_pending': str_output += '{0}\n'.format(_get_str(group_tasks[status], status in _PENDING_SUB_STATUSES)) ext_workers = _get_external_workers(worker) group_tasks_ext_workers = {} for ext_worker, task_dict in ext_workers.items(): group_tasks_ext_workers[ext_worker] = _group_tasks_by_name_and_status(task_dict) if len(ext_workers) > 0: str_output += "\nThe other workers were:\n" count = 0 for ext_worker, task_dict in ext_workers.items(): if count > 3 and count < len(ext_workers) - 1: str_output += " and {0} other workers".format(len(ext_workers) - count) break str_output += " - {0} ran {1} tasks\n".format(ext_worker, len(task_dict)) count += 1 str_output += '\n' if num_all_tasks == sum([len(set_tasks["already_done"]), len(set_tasks["scheduling_error"]), len(set_tasks["still_pending_ext"]), len(set_tasks["still_pending_not_ext"])]): if len(ext_workers) == 0: str_output += '\n' str_output += 'Did not run any tasks' smiley = "" reason = "" if set_tasks["failed"]: smiley = ":(" reason = "there were failed tasks" if set_tasks["scheduling_error"]: reason += " and tasks whose scheduling failed" elif set_tasks["scheduling_error"]: smiley = ":(" reason = "there were tasks whose scheduling failed" elif set_tasks["not_run"]: smiley = ":|" reason = "there were tasks that were not granted run permission by the scheduler" elif set_tasks["still_pending_ext"]: smiley = ":|" reason = "there were missing external dependencies" else: smiley = ":)" reason = "there were no failed tasks or missing external dependencies" str_output += "\nThis progress looks {0} because {1}".format(smiley, reason) if num_all_tasks == 0: str_output = 'Did not schedule any tasks' return str_output def _summary_wrap(str_output): return textwrap.dedent(""" ===== Luigi Execution Summary ===== {str_output} ===== Luigi Execution Summary ===== """).format(str_output=str_output) def summary(worker): """ Given a worker, return a human readable summary of what the worker have done. """ return _summary_wrap(_summary_format(_summary_dict(worker), worker)) # 5 # -*- coding: utf-8 -*- # # Copyright 2015-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ This module provide the function :py:func:`summary` that is used for printing an `execution summary <https://github.com/spotify/luigi/blob/master/examples/execution_summary_example.py>`_ at the end of luigi invocations. """ import textwrap import collections import functools import luigi class execution_summary(luigi.Config): summary_length = luigi.IntParameter(default=5) def _partition_tasks(worker): """ Takes a worker and sorts out tasks based on their status. Still_pending_not_ext is only used to get upstream_failure, upstream_missing_dependency and run_by_other_worker """ task_history = worker._add_task_history pending_tasks = {task for(task, status, ext) in task_history if status == 'PENDING'} set_tasks = {} set_tasks["completed"] = {task for (task, status, ext) in task_history if status == 'DONE' and task in pending_tasks} set_tasks["already_done"] = {task for (task, status, ext) in task_history if status == 'DONE' and task not in pending_tasks and task not in set_tasks["completed"]} set_tasks["ever_failed"] = {task for (task, status, ext) in task_history if status == 'FAILED'} set_tasks["failed"] = set_tasks["ever_failed"] - set_tasks["completed"] set_tasks["scheduling_error"] = {task for(task, status, ext) in task_history if status == 'UNKNOWN'} set_tasks["still_pending_ext"] = {task for (task, status, ext) in task_history if status == 'PENDING' and task not in set_tasks["ever_failed"] and task not in set_tasks["completed"] and not ext} set_tasks["still_pending_not_ext"] = {task for (task, status, ext) in task_history if status == 'PENDING' and task not in set_tasks["ever_failed"] and task not in set_tasks["completed"] and ext} set_tasks["run_by_other_worker"] = set() set_tasks["upstream_failure"] = set() set_tasks["upstream_missing_dependency"] = set() set_tasks["upstream_run_by_other_worker"] = set() set_tasks["upstream_scheduling_error"] = set() set_tasks["not_run"] = set() return set_tasks def _root_task(worker): """ Return the first task scheduled by the worker, corresponding to the root task """ return worker._add_task_history[0][0] def _populate_unknown_statuses(set_tasks): """ Add the "upstream_*" and "not_run" statuses my mutating set_tasks. """ visited = set() for task in set_tasks["still_pending_not_ext"]: _depth_first_search(set_tasks, task, visited) def _depth_first_search(set_tasks, current_task, visited): """ This dfs checks why tasks are still pending. """ visited.add(current_task) if current_task in set_tasks["still_pending_not_ext"]: upstream_failure = False upstream_missing_dependency = False upstream_run_by_other_worker = False upstream_scheduling_error = False for task in current_task._requires(): if task not in visited: _depth_first_search(set_tasks, task, visited) if task in set_tasks["ever_failed"] or task in set_tasks["upstream_failure"]: set_tasks["upstream_failure"].add(current_task) upstream_failure = True if task in set_tasks["still_pending_ext"] or task in set_tasks["upstream_missing_dependency"]: set_tasks["upstream_missing_dependency"].add(current_task) upstream_missing_dependency = True if task in set_tasks["run_by_other_worker"] or task in set_tasks["upstream_run_by_other_worker"]: set_tasks["upstream_run_by_other_worker"].add(current_task) upstream_run_by_other_worker = True if task in set_tasks["scheduling_error"]: set_tasks["upstream_scheduling_error"].add(current_task) upstream_scheduling_error = True if not upstream_failure and not upstream_missing_dependency and \ not upstream_run_by_other_worker and not upstream_scheduling_error and \ current_task not in set_tasks["run_by_other_worker"]: set_tasks["not_run"].add(current_task) def _get_str(task_dict, extra_indent): """ This returns a string for each status """ summary_length = execution_summary().summary_length lines = [] task_names = sorted(task_dict.keys()) for task_family in task_names: tasks = task_dict[task_family] tasks = sorted(tasks, key=lambda x: str(x)) prefix_size = 8 if extra_indent else 4 prefix = ' ' * prefix_size line = None if summary_length > 0 and len(lines) >= summary_length: line = prefix + "..." lines.append(line) break if len(tasks[0].get_params()) == 0: line = prefix + '- {0} {1}()'.format(len(tasks), str(task_family)) elif _get_len_of_params(tasks[0]) > 60 or len(str(tasks[0])) > 200 or \ (len(tasks) == 2 and len(tasks[0].get_params()) > 1 and (_get_len_of_params(tasks[0]) > 40 or len(str(tasks[0])) > 100)): """ This is to make sure that there is no really long task in the output """ line = prefix + '- {0} {1}(...)'.format(len(tasks), task_family) elif len((tasks[0].get_params())) == 1: attributes = {getattr(task, tasks[0].get_params()[0][0]) for task in tasks} param_class = tasks[0].get_params()[0][1] first, last = _ranging_attributes(attributes, param_class) if first is not None and last is not None and len(attributes) > 3: param_str = '{0}...{1}'.format(param_class.serialize(first), param_class.serialize(last)) else: param_str = '{0}'.format(_get_str_one_parameter(tasks)) line = prefix + '- {0} {1}({2}={3})'.format(len(tasks), task_family, tasks[0].get_params()[0][0], param_str) else: ranging = False params = _get_set_of_params(tasks) unique_param_keys = list(_get_unique_param_keys(params)) if len(unique_param_keys) == 1: unique_param, = unique_param_keys attributes = params[unique_param] param_class = unique_param[1] first, last = _ranging_attributes(attributes, param_class) if first is not None and last is not None and len(attributes) > 2: ranging = True line = prefix + '- {0} {1}({2}'.format(len(tasks), task_family, _get_str_ranging_multiple_parameters(first, last, tasks, unique_param)) if not ranging: if len(tasks) == 1: line = prefix + '- {0} {1}'.format(len(tasks), tasks[0]) if len(tasks) == 2: line = prefix + '- {0} {1} and {2}'.format(len(tasks), tasks[0], tasks[1]) if len(tasks) > 2: line = prefix + '- {0} {1} ...'.format(len(tasks), tasks[0]) lines.append(line) return '\n'.join(lines) def _get_len_of_params(task): return sum(len(param[0]) for param in task.get_params()) def _get_str_ranging_multiple_parameters(first, last, tasks, unique_param): row = '' str_unique_param = '{0}...{1}'.format(unique_param[1].serialize(first), unique_param[1].serialize(last)) for param in tasks[0].get_params(): row += '{0}='.format(param[0]) if param[0] == unique_param[0]: row += '{0}'.format(str_unique_param) else: row += '{0}'.format(param[1].serialize(getattr(tasks[0], param[0]))) if param != tasks[0].get_params()[-1]: row += ", " row += ')' return row def _get_set_of_params(tasks): params = {} for param in tasks[0].get_params(): params[param] = {getattr(task, param[0]) for task in tasks} return params def _get_unique_param_keys(params): for param_key, param_values in params.items(): if len(param_values) > 1: yield param_key def _ranging_attributes(attributes, param_class): """ Checks if there is a continuous range """ next_attributes = {param_class.next_in_enumeration(attribute) for attribute in attributes} in_first = attributes.difference(next_attributes) in_second = next_attributes.difference(attributes) if len(in_first) == 1 and len(in_second) == 1: for x in attributes: if {param_class.next_in_enumeration(x)} == in_second: return next(iter(in_first)), x return None, None def _get_str_one_parameter(tasks): row = '' count = 0 for task in tasks: if (len(row) >= 30 and count > 2 and count != len(tasks) - 1) or len(row) > 200: row += '...' break param = task.get_params()[0] row += '{0}'.format(param[1].serialize(getattr(task, param[0]))) if count < len(tasks) - 1: row += ',' count += 1 return row def _serialize_first_param(task): return task.get_params()[0][1].serialize(getattr(task, task.get_params()[0][0])) def _get_number_of_tasks_for(status, group_tasks): if status == "still_pending": return (_get_number_of_tasks(group_tasks["still_pending_ext"]) + _get_number_of_tasks(group_tasks["still_pending_not_ext"])) return _get_number_of_tasks(group_tasks[status]) def _get_number_of_tasks(task_dict): return sum(len(tasks) for tasks in task_dict.values()) def _get_comments(group_tasks): """ Get the human readable comments and quantities for the task types. """ comments = {} for status, human in _COMMENTS: num_tasks = _get_number_of_tasks_for(status, group_tasks) if num_tasks: space = " " if status in _PENDING_SUB_STATUSES else "" comments[status] = '{space}* {num_tasks} {human}:\n'.format( space=space, num_tasks=num_tasks, human=human) return comments # Oredered in the sense that they'll be printed in this order _ORDERED_STATUSES = ( "already_done", "completed", "ever_failed", "failed", "scheduling_error", "still_pending", "still_pending_ext", "run_by_other_worker", "upstream_failure", "upstream_missing_dependency", "upstream_run_by_other_worker", "upstream_scheduling_error", "not_run", ) _PENDING_SUB_STATUSES = set(_ORDERED_STATUSES[_ORDERED_STATUSES.index("still_pending_ext"):]) _COMMENTS = set(( ("already_done", 'present dependencies were encountered'), ("completed", 'ran successfully'), ("failed", 'failed'), ("scheduling_error", 'failed scheduling'), ("still_pending", 'were left pending, among these'), ("still_pending_ext", 'were missing external dependencies'), ("run_by_other_worker", 'were being run by another worker'), ("upstream_failure", 'had failed dependencies'), ("upstream_missing_dependency", 'had missing external dependencies'), ("upstream_run_by_other_worker", 'had dependencies that were being run by other worker'), ("upstream_scheduling_error", 'had dependencies whose scheduling failed'), ("not_run", 'was not granted run permission by the scheduler'), )) def _get_run_by_other_worker(worker): """ This returns a set of the tasks that are being run by other worker """ task_sets = _get_external_workers(worker).values() return functools.reduce(lambda a, b: a | b, task_sets, set()) def _get_external_workers(worker): """ This returns a dict with a set of tasks for all of the other workers """ worker_that_blocked_task = collections.defaultdict(set) get_work_response_history = worker._get_work_response_history for get_work_response in get_work_response_history: if get_work_response['task_id'] is None: for running_task in get_work_response['running_tasks']: other_worker_id = running_task['worker'] other_task_id = running_task['task_id'] other_task = worker._scheduled_tasks.get(other_task_id) if other_worker_id == worker._id or not other_task: continue worker_that_blocked_task[other_worker_id].add(other_task) return worker_that_blocked_task def _group_tasks_by_name_and_status(task_dict): """ Takes a dictionary with sets of tasks grouped by their status and returns a dictionary with dictionaries with an array of tasks grouped by their status and task name """ group_status = {} for task in task_dict: if task.task_family not in group_status: group_status[task.task_family] = [] group_status[task.task_family].append(task) return group_status def _summary_dict(worker): set_tasks = _partition_tasks(worker) set_tasks["run_by_other_worker"] = _get_run_by_other_worker(worker) _populate_unknown_statuses(set_tasks) return set_tasks def _summary_format(set_tasks, worker): group_tasks = {} for status, task_dict in set_tasks.items(): group_tasks[status] = _group_tasks_by_name_and_status(task_dict) comments = _get_comments(group_tasks) num_all_tasks = sum([len(set_tasks["already_done"]), len(set_tasks["completed"]), len(set_tasks["failed"]), len(set_tasks["scheduling_error"]), len(set_tasks["still_pending_ext"]), len(set_tasks["still_pending_not_ext"])]) str_output = '' str_output += 'Scheduled {0} tasks of which:\n'.format(num_all_tasks) for status in _ORDERED_STATUSES: if status not in comments: continue str_output += '{0}'.format(comments[status]) if status != 'still_pending': str_output += '{0}\n'.format(_get_str(group_tasks[status], status in _PENDING_SUB_STATUSES)) ext_workers = _get_external_workers(worker) group_tasks_ext_workers = {} for ext_worker, task_dict in ext_workers.items(): group_tasks_ext_workers[ext_worker] = _group_tasks_by_name_and_status(task_dict) if len(ext_workers) > 0: str_output += "\nThe other workers were:\n" count = 0 for ext_worker, task_dict in ext_workers.items(): if count > 3 and count < len(ext_workers) - 1: str_output += " and {0} other workers".format(len(ext_workers) - count) break str_output += " - {0} ran {1} tasks\n".format(ext_worker, len(task_dict)) count += 1 str_output += '\n' if num_all_tasks == sum([len(set_tasks["already_done"]), len(set_tasks["scheduling_error"]), len(set_tasks["still_pending_ext"]), len(set_tasks["still_pending_not_ext"])]): if len(ext_workers) == 0: str_output += '\n' str_output += 'Did not run any tasks' smiley = "" reason = "" if set_tasks["ever_failed"]: if not set_tasks["failed"]: smiley = ":)" reason = "there were failed tasks but they all suceeded in a retry" else: smiley = ":(" reason = "there were failed tasks" if set_tasks["scheduling_error"]: reason += " and tasks whose scheduling failed" elif set_tasks["scheduling_error"]: smiley = ":(" reason = "there were tasks whose scheduling failed" elif set_tasks["not_run"]: smiley = ":|" reason = "there were tasks that were not granted run permission by the scheduler" elif set_tasks["still_pending_ext"]: smiley = ":|" reason = "there were missing external dependencies" else: smiley = ":)" reason = "there were no failed tasks or missing external dependencies" str_output += "\nThis progress looks {0} because {1}".format(smiley, reason) if num_all_tasks == 0: str_output = 'Did not schedule any tasks' return str_output def _summary_wrap(str_output): return textwrap.dedent(""" ===== Luigi Execution Summary ===== {str_output} ===== Luigi Execution Summary ===== """).format(str_output=str_output) def summary(worker): """ Given a worker, return a human readable summary of what the worker have done. """ return _summary_wrap(_summary_format(_summary_dict(worker), worker)) # 5
BugsInPy/BugsInPy/temp/projects/luigi/bug-9-fixed/luigi/luigi/execution_summary.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-9-buggy/luigi/luigi/execution_summary.py
luigi-bug-26
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Provides functionality to run a Hadoop job using a Jar """ import logging import os import random import luigi.contrib.hadoop import luigi.contrib.hdfs logger = logging.getLogger('luigi-interface') def fix_paths(job): """ Coerce input arguments to use temporary files when used for output. Return a list of temporary file pairs (tmpfile, destination path) and a list of arguments. Converts each HdfsTarget to a string for the path. """ tmp_files = [] args = [] for x in job.args(): if isinstance(x, luigi.contrib.hdfs.HdfsTarget): # input/output if x.exists() or not job.atomic_output(): # input args.append(x.path) else: # output x_path_no_slash = x.path[:-1] if x.path[-1] == '/' else x.path y = luigi.contrib.hdfs.HdfsTarget(x_path_no_slash + '-luigi-tmp-%09d' % random.randrange(0, 1e10)) tmp_files.append((y, x_path_no_slash)) logger.info('Using temp path: %s for path %s', y.path, x.path) args.append(y.path) else: args.append(str(x)) return (tmp_files, args) class HadoopJarJobError(Exception): pass class HadoopJarJobRunner(luigi.contrib.hadoop.JobRunner): """ JobRunner for `hadoop jar` commands. Used to run a HadoopJarJobTask. """ def __init__(self): pass def run_job(self, job): ssh_config = job.ssh() if ssh_config: host = ssh_config.get("host", None) key_file = ssh_config.get("key_file", None) username = ssh_config.get("username", None) if not host or not key_file or not username or not job.jar(): raise HadoopJarJobError("missing some config for HadoopRemoteJarJobRunner") arglist = ['ssh', '-i', key_file, '-o', 'BatchMode=yes'] # no password prompts etc if ssh_config.get("no_host_key_check", False): arglist += ['-o', 'UserKnownHostsFile=/dev/null', '-o', 'StrictHostKeyChecking=no'] arglist.append('{}@{}'.format(username, host)) else: arglist = [] if not job.jar() or not os.path.exists(job.jar()): logger.error("Can't find jar: %s, full path %s", job.jar(), os.path.abspath(job.jar())) raise HadoopJarJobError("job jar does not exist") # TODO(jcrobak): libjars, files, etc. Can refactor out of # hadoop.HadoopJobRunner hadoop_arglist = luigi.contrib.hdfs.load_hadoop_cmd() + ['jar', job.jar()] if job.main(): hadoop_arglist.append(job.main()) jobconfs = job.jobconfs() for jc in jobconfs: hadoop_arglist += ['-D' + jc] (tmp_files, job_args) = fix_paths(job) hadoop_arglist += job_args arglist.extend(hadoop_arglist) luigi.contrib.hadoop.run_and_track_hadoop_job(arglist) for a, b in tmp_files: a.move(b) class HadoopJarJobTask(luigi.contrib.hadoop.BaseHadoopJobTask): """ A job task for `hadoop jar` commands that define a jar and (optional) main method. """ def jar(self): """ Path to the jar for this Hadoop Job. """ return None def main(self): """ optional main method for this Hadoop Job. """ return None def job_runner(self): # We recommend that you define a subclass, override this method and set up your own config return HadoopJarJobRunner() def atomic_output(self): """ If True, then rewrite output arguments to be temp locations and atomically move them into place after the job finishes. """ return True def ssh(self): """ Set this to run hadoop command remotely via ssh. It needs to be a dict that looks like {"host": "myhost", "key_file": None, "username": None, ["no_host_key_check": False]} """ return None def args(self): """ Returns an array of args to pass to the job (after hadoop jar <jar> <main>). """ return [] # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Provides functionality to run a Hadoop job using a Jar """ import logging import os import random import luigi.contrib.hadoop import luigi.contrib.hdfs logger = logging.getLogger('luigi-interface') def fix_paths(job): """ Coerce input arguments to use temporary files when used for output. Return a list of temporary file pairs (tmpfile, destination path) and a list of arguments. Converts each HdfsTarget to a string for the path. """ tmp_files = [] args = [] for x in job.args(): if isinstance(x, luigi.contrib.hdfs.HdfsTarget): # input/output if x.exists() or not job.atomic_output(): # input args.append(x.path) else: # output x_path_no_slash = x.path[:-1] if x.path[-1] == '/' else x.path y = luigi.contrib.hdfs.HdfsTarget(x_path_no_slash + '-luigi-tmp-%09d' % random.randrange(0, 1e10)) tmp_files.append((y, x_path_no_slash)) logger.info('Using temp path: %s for path %s', y.path, x.path) args.append(y.path) else: args.append(str(x)) return (tmp_files, args) class HadoopJarJobError(Exception): pass class HadoopJarJobRunner(luigi.contrib.hadoop.JobRunner): """ JobRunner for `hadoop jar` commands. Used to run a HadoopJarJobTask. """ def __init__(self): pass def run_job(self, job): ssh_config = job.ssh() if ssh_config: host = ssh_config.get("host", None) key_file = ssh_config.get("key_file", None) username = ssh_config.get("username", None) if not host or not key_file or not username or not job.jar(): raise HadoopJarJobError("missing some config for HadoopRemoteJarJobRunner") arglist = ['ssh', '-i', key_file, '-o', 'BatchMode=yes'] # no password prompts etc if ssh_config.get("no_host_key_check", False): arglist += ['-o', 'UserKnownHostsFile=/dev/null', '-o', 'StrictHostKeyChecking=no'] arglist.append('{}@{}'.format(username, host)) else: arglist = [] if not job.jar(): raise HadoopJarJobError("Jar not defined") if not os.path.exists(job.jar()): logger.error("Can't find jar: %s, full path %s", job.jar(), os.path.abspath(job.jar())) raise HadoopJarJobError("job jar does not exist") # TODO(jcrobak): libjars, files, etc. Can refactor out of # hadoop.HadoopJobRunner hadoop_arglist = luigi.contrib.hdfs.load_hadoop_cmd() + ['jar', job.jar()] if job.main(): hadoop_arglist.append(job.main()) jobconfs = job.jobconfs() for jc in jobconfs: hadoop_arglist += ['-D' + jc] (tmp_files, job_args) = fix_paths(job) hadoop_arglist += job_args arglist.extend(hadoop_arglist) luigi.contrib.hadoop.run_and_track_hadoop_job(arglist) for a, b in tmp_files: a.move(b) class HadoopJarJobTask(luigi.contrib.hadoop.BaseHadoopJobTask): """ A job task for `hadoop jar` commands that define a jar and (optional) main method. """ def jar(self): """ Path to the jar for this Hadoop Job. """ return None def main(self): """ optional main method for this Hadoop Job. """ return None def job_runner(self): # We recommend that you define a subclass, override this method and set up your own config return HadoopJarJobRunner() def atomic_output(self): """ If True, then rewrite output arguments to be temp locations and atomically move them into place after the job finishes. """ return True def ssh(self): """ Set this to run hadoop command remotely via ssh. It needs to be a dict that looks like {"host": "myhost", "key_file": None, "username": None, ["no_host_key_check": False]} """ return None def args(self): """ Returns an array of args to pass to the job (after hadoop jar <jar> <main>). """ return []
BugsInPy/BugsInPy/temp/projects/luigi/bug-26-fixed/luigi/luigi/contrib/hadoop_jar.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-26-buggy/luigi/luigi/contrib/hadoop_jar.py
luigi-bug-33
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc try: from itertools import imap as map except ImportError: pass import logging import traceback import warnings from luigi import six from luigi import parameter Parameter = parameter.Parameter logger = logging.getLogger('luigi-interface') def namespace(namespace=None): """ Call to set namespace of tasks declared after the call. If called without arguments or with ``None`` as the namespace, the namespace is reset, which is recommended to do at the end of any file where the namespace is set to avoid unintentionally setting namespace on tasks outside of the scope of the current file. """ Register._default_namespace = namespace def id_to_name_and_params(task_id): # DEPRECATED import luigi.tools.parse_task return luigi.tools.parse_task.id_to_name_and_params(task_id) class Register(abc.ABCMeta): """ The Metaclass of :py:class:`Task`. Acts as a global registry of Tasks with the following properties: 1. Cache instances of objects so that eg. ``X(1, 2, 3)`` always returns the same object. 2. Keep track of all subclasses of :py:class:`Task` and expose them. """ __instance_cache = {} _default_namespace = None _reg = [] AMBIGUOUS_CLASS = object() # Placeholder denoting an error """If this value is returned by :py:meth:`get_reg` then there is an ambiguous task name (two :py:class:`Task` have the same name). This denotes an error.""" def __new__(metacls, classname, bases, classdict): """ Custom class creation for namespacing. Also register all subclasses. Set the task namespace to whatever the currently declared namespace is. """ if "task_namespace" not in classdict: classdict["task_namespace"] = metacls._default_namespace cls = super(Register, metacls).__new__(metacls, classname, bases, classdict) metacls._reg.append(cls) return cls def __call__(cls, *args, **kwargs): """ Custom class instantiation utilizing instance cache. If a Task has already been instantiated with the same parameters, the previous instance is returned to reduce number of object instances. """ def instantiate(): return super(Register, cls).__call__(*args, **kwargs) h = Register.__instance_cache if h is None: # disabled return instantiate() params = cls.get_params() param_values = cls.get_param_values(params, args, kwargs) k = (cls, tuple(param_values)) try: hash(k) except TypeError: logger.debug("Not all parameter values are hashable so instance isn't coming from the cache") return instantiate() # unhashable types in parameters if k not in h: h[k] = instantiate() return h[k] @classmethod def clear_instance_cache(cls): """ Clear/Reset the instance cache. """ Register.__instance_cache = {} @classmethod def disable_instance_cache(cls): """ Disables the instance cache. """ Register.__instance_cache = None @property def task_family(cls): """ The task family for the given class. If ``cls.task_namespace is None`` then it's the name of the class. Otherwise, ``<task_namespace>.`` is prefixed to the class name. """ if cls.task_namespace is None: return cls.__name__ else: return "%s.%s" % (cls.task_namespace, cls.__name__) @classmethod def get_reg(cls, include_config_without_section=False): """Return all of the registery classes. :return: a ``dict`` of task_family -> class """ # We have to do this on-demand in case task names have changed later reg = {} for cls in cls._reg: if cls.run == NotImplemented: continue if issubclass(cls, ConfigWithoutSection) and not include_config_without_section: continue name = cls.task_family if name in reg and reg[name] != cls and \ reg[name] != cls.AMBIGUOUS_CLASS and \ not issubclass(cls, reg[name]): # Registering two different classes - this means we can't instantiate them by name # The only exception is if one class is a subclass of the other. In that case, we # instantiate the most-derived class (this fixes some issues with decorator wrappers). reg[name] = cls.AMBIGUOUS_CLASS else: reg[name] = cls return reg @classmethod def tasks_str(cls): """ Human-readable register contents dump. """ return repr(sorted(Register.get_reg().keys())) @classmethod def get_task_cls(cls, name): """ Returns an unambiguous class or raises an exception. """ task_cls = Register.get_reg().get(name) if not task_cls: raise Exception('Task %r not found. Candidates are: %s' % (name, Register.tasks_str())) if task_cls == Register.AMBIGUOUS_CLASS: raise Exception('Task %r is ambiguous' % name) return task_cls @classmethod def get_all_params(cls): """ Compiles and returns all parameters for all :py:class:`Task`. :return: a ``dict`` of parameter name -> parameter. """ for task_name, task_cls in six.iteritems(cls.get_reg(include_config_without_section=True)): if task_cls == cls.AMBIGUOUS_CLASS: continue for param_name, param_obj in task_cls.get_params(): yield task_name, issubclass(task_cls, ConfigWithoutSection), param_name, param_obj @six.add_metaclass(Register) class Task(object): """ This is the base class of all Luigi Tasks, the base unit of work in Luigi. A Luigi Task describes a unit or work. The key methods of a Task, which must be implemented in a subclass are: * :py:meth:`run` - the computation done by this task. * :py:meth:`requires` - the list of Tasks that this Task depends on. * :py:meth:`output` - the output :py:class:`Target` that this Task creates. Parameters to the Task should be declared as members of the class, e.g.:: .. code-block:: python class MyTask(luigi.Task): count = luigi.IntParameter() Each Task exposes a constructor accepting all :py:class:`Parameter` (and values) as kwargs. e.g. ``MyTask(count=10)`` would instantiate `MyTask`. In addition to any declared properties and methods, there are a few non-declared properties, which are created by the :py:class:`Register` metaclass: ``Task.task_namespace`` optional string which is prepended to the task name for the sake of scheduling. If it isn't overridden in a Task, whatever was last declared using `luigi.namespace` will be used. ``Task._parameters`` list of ``(parameter_name, parameter)`` tuples for this task class """ _event_callbacks = {} # Priority of the task: the scheduler should favor available # tasks with higher priority values first. priority = 0 disabled = False # Resources used by the task. Should be formatted like {"scp": 1} to indicate that the # task requires 1 unit of the scp resource. resources = {} # Number of seconds after which to time out the run function. No timeout if set to 0. Defaults # to 0 or value in client.cfg worker_timeout = None @classmethod def event_handler(cls, event): """ Decorator for adding event handlers. """ def wrapped(callback): cls._event_callbacks.setdefault(cls, {}).setdefault(event, set()).add(callback) return callback return wrapped def trigger_event(self, event, *args, **kwargs): """ Trigger that calls all of the specified events associated with this class. """ for event_class, event_callbacks in six.iteritems(self._event_callbacks): if not isinstance(self, event_class): continue for callback in event_callbacks.get(event, []): try: # callbacks are protected callback(*args, **kwargs) except KeyboardInterrupt: return except BaseException: logger.exception("Error in event callback for %r", event) @property def task_module(self): # Returns what Python module to import to get access to this class # TODO(erikbern): we should think about a language-agnostic mechanism return self.__class__.__module__ @property def task_family(self): """ Convenience method since a property on the metaclass isn't directly accessible through the class instances. """ return self.__class__.task_family @classmethod def get_params(cls): """ Returns all of the Parameters for this Task. """ # We want to do this here and not at class instantiation, or else there is no room to extend classes dynamically params = [] for param_name in dir(cls): param_obj = getattr(cls, param_name) if not isinstance(param_obj, Parameter): continue params.append((param_name, param_obj)) # The order the parameters are created matters. See Parameter class params.sort(key=lambda t: t[1].counter) return params @classmethod def get_param_values(cls, params, args, kwargs): """ Get the values of the parameters from the args and kwargs. :param params: list of (param_name, Parameter). :param args: positional arguments :param kwargs: keyword arguments. :returns: list of `(name, value)` tuples, one for each parameter. """ result = {} params_dict = dict(params) task_name = cls.task_family # In case any exceptions are thrown, create a helpful description of how the Task was invoked # TODO: should we detect non-reprable arguments? These will lead to mysterious errors exc_desc = '%s[args=%s, kwargs=%s]' % (task_name, args, kwargs) # Fill in the positional arguments positional_params = [(n, p) for n, p in params if p.significant] for i, arg in enumerate(args): if i >= len(positional_params): raise parameter.UnknownParameterException('%s: takes at most %d parameters (%d given)' % (exc_desc, len(positional_params), len(args))) param_name, param_obj = positional_params[i] result[param_name] = arg # Then the optional arguments for param_name, arg in six.iteritems(kwargs): if param_name in result: raise parameter.DuplicateParameterException('%s: parameter %s was already set as a positional parameter' % (exc_desc, param_name)) if param_name not in params_dict: raise parameter.UnknownParameterException('%s: unknown parameter %s' % (exc_desc, param_name)) result[param_name] = arg # Then use the defaults for anything not filled in for param_name, param_obj in params: if param_name not in result: if not param_obj.has_task_value(task_name, param_name): raise parameter.MissingParameterException("%s: requires the '%s' parameter to be set" % (exc_desc, param_name)) result[param_name] = param_obj.task_value(task_name, param_name) def list_to_tuple(x): """ Make tuples out of lists and sets to allow hashing """ if isinstance(x, list) or isinstance(x, set): return tuple(x) else: return x # Sort it by the correct order and make a list return [(param_name, list_to_tuple(result[param_name])) for param_name, param_obj in params] def __init__(self, *args, **kwargs): """ Constructor to resolve values for all Parameters. For example, the Task: .. code-block:: python class MyTask(luigi.Task): count = luigi.IntParameter() can be instantiated as ``MyTask(count=10)``. """ params = self.get_params() param_values = self.get_param_values(params, args, kwargs) # Set all values on class instance for key, value in param_values: setattr(self, key, value) # Register args and kwargs as an attribute on the class. Might be useful self.param_args = tuple(value for key, value in param_values) self.param_kwargs = dict(param_values) # Build up task id task_id_parts = [] param_objs = dict(params) for param_name, param_value in param_values: if dict(params)[param_name].significant: task_id_parts.append('%s=%s' % (param_name, param_objs[param_name].serialize(param_value))) self.task_id = '%s(%s)' % (self.task_family, ', '.join(task_id_parts)) self.__hash = hash(self.task_id) def initialized(self): """ Returns ``True`` if the Task is initialized and ``False`` otherwise. """ return hasattr(self, 'task_id') @classmethod def from_str_params(cls, params_str=None): """ Creates an instance from a str->str hash. :param params_str: dict of param name -> value. """ if params_str is None: params_str = {} kwargs = {} for param_name, param in cls.get_params(): value = param.parse_from_input(param_name, params_str[param_name]) kwargs[param_name] = value return cls(**kwargs) def to_str_params(self): # Convert all parameters to a str->str hash params_str = {} params = dict(self.get_params()) for param_name, param_value in six.iteritems(self.param_kwargs): params_str[param_name] = params[param_name].serialize(param_value) return params_str def clone(self, cls=None, **kwargs): """ Creates a new instance from an existing instance where some of the args have changed. There's at least two scenarios where this is useful (see test/clone_test.py): * remove a lot of boiler plate when you have recursive dependencies and lots of args * there's task inheritance and some logic is on the base class :param cls: :param kwargs: :return: """ k = self.param_kwargs.copy() k.update(six.iteritems(kwargs)) if cls is None: cls = self.__class__ new_k = {} for param_name, param_class in cls.get_params(): if param_name in k: new_k[param_name] = k[param_name] return cls(**new_k) def __hash__(self): return self.__hash def __repr__(self): return self.task_id def __eq__(self, other): return self.__class__ == other.__class__ and self.param_args == other.param_args def complete(self): """ If the task has any outputs, return ``True`` if all outputs exists. Otherwise, return ``False``. However, you may freely override this method with custom logic. """ outputs = flatten(self.output()) if len(outputs) == 0: warnings.warn( "Task %r without outputs has no custom complete() method" % self, stacklevel=2 ) return False return all(map(lambda output: output.exists(), outputs)) @classmethod def bulk_complete(cls, parameter_tuples): """ Returns those of parameter_tuples for which this Task is complete. Override (with an efficient implementation) for efficient scheduling with range tools. Keep the logic consistent with that of complete(). """ raise NotImplementedError def output(self): """ The output that this Task produces. The output of the Task determines if the Task needs to be run--the task is considered finished iff the outputs all exist. Subclasses should override this method to return a single :py:class:`Target` or a list of :py:class:`Target` instances. Implementation note If running multiple workers, the output must be a resource that is accessible by all workers, such as a DFS or database. Otherwise, workers might compute the same output since they don't see the work done by other workers. """ return [] # default impl def requires(self): """ The Tasks that this Task depends on. A Task will only run if all of the Tasks that it requires are completed. If your Task does not require any other Tasks, then you don't need to override this method. Otherwise, a Subclasses can override this method to return a single Task, a list of Task instances, or a dict whose values are Task instances. """ return [] # default impl def _requires(self): """ Override in "template" tasks which themselves are supposed to be subclassed and thus have their requires() overridden (name preserved to provide consistent end-user experience), yet need to introduce (non-input) dependencies. Must return an iterable which among others contains the _requires() of the superclass. """ return flatten(self.requires()) # base impl def process_resources(self): """ Override in "template" tasks which provide common resource functionality but allow subclasses to specify additional resources while preserving the name for consistent end-user experience. """ return self.resources # default impl def input(self): """ Returns the outputs of the Tasks returned by :py:meth:`requires` :return: a list of :py:class:`Target` objects which are specified as outputs of all required Tasks. """ return getpaths(self.requires()) def deps(self): """ Internal method used by the scheduler. Returns the flattened list of requires. """ # used by scheduler return flatten(self._requires()) def run(self): """ The task run method, to be overridden in a subclass. """ pass # default impl def on_failure(self, exception): """ Override for custom error handling. This method gets called if an exception is raised in :py:meth:`run`. Return value of this method is json encoded and sent to the scheduler as the `expl` argument. Its string representation will be used as the body of the error email sent out if any. Default behavior is to return a string representation of the stack trace. """ traceback_string = traceback.format_exc() return "Runtime error:\n%s" % traceback_string def on_success(self): """ Override for doing custom completion handling for a larger class of tasks This method gets called when :py:meth:`run` completes without raising any exceptions. The returned value is json encoded and sent to the scheduler as the `expl` argument. Default behavior is to send an None value""" pass class MixinNaiveBulkComplete(object): """ Enables a Task to be efficiently scheduled with e.g. range tools, by providing a bulk_complete implementation which checks completeness in a loop. Applicable to tasks whose completeness checking is cheap. This doesn't exploit output location specific APIs for speed advantage, nevertheless removes redundant scheduler roundtrips. """ @classmethod def bulk_complete(cls, parameter_tuples): return [t for t in parameter_tuples if cls(t).complete()] def externalize(task): """ Returns an externalized version of the Task. See :py:class:`ExternalTask`. """ task.run = NotImplemented return task class ExternalTask(Task): """ Subclass for references to external dependencies. An ExternalTask's does not have a `run` implementation, which signifies to the framework that this Task's :py:meth:`output` is generated outside of Luigi. """ run = NotImplemented class WrapperTask(Task): """ Use for tasks that only wrap other tasks and that by definition are done if all their requirements exist. """ def complete(self): return all(r.complete() for r in flatten(self.requires())) class Config(Task): """Used for configuration that's not specific to a certain task TODO: let's refactor Task & Config so that it inherits from a common ParamContainer base class """ pass class ConfigWithoutSection(Task): """Used for configuration that doesn't have a particular section (eg. --n-workers) """ pass def getpaths(struct): """ Maps all Tasks in a structured data object to their .output(). """ if isinstance(struct, Task): return struct.output() elif isinstance(struct, dict): r = {} for k, v in six.iteritems(struct): r[k] = getpaths(v) return r else: # Remaining case: assume r is iterable... try: s = list(struct) except TypeError: raise Exception('Cannot map %s to Task/dict/list' % str(struct)) return [getpaths(r) for r in s] def flatten(struct): """ Creates a flat list of all all items in structured output (dicts, lists, items): .. code-block:: python >>> flatten({'a': 'foo', 'b': 'bar'}) ['foo', 'bar'] >>> flatten(['foo', ['bar', 'troll']]) ['foo', 'bar', 'troll'] >>> flatten('foo') ['foo'] >>> flatten(42) [42] """ if struct is None: return [] flat = [] if isinstance(struct, dict): for _, result in six.iteritems(struct): flat += flatten(result) return flat if isinstance(struct, six.string_types): return [struct] try: # if iterable for result in struct: flat += flatten(result) return flat except TypeError: pass return [struct] def flatten_output(task): """ Lists all output targets by recursively walking output-less (wrapper) tasks. FIXME order consistently. """ r = flatten(task.output()) if not r: for dep in flatten(task.requires()): r += flatten_output(dep) return r # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc try: from itertools import imap as map except ImportError: pass import logging import traceback import warnings from luigi import six from luigi import parameter Parameter = parameter.Parameter logger = logging.getLogger('luigi-interface') def namespace(namespace=None): """ Call to set namespace of tasks declared after the call. If called without arguments or with ``None`` as the namespace, the namespace is reset, which is recommended to do at the end of any file where the namespace is set to avoid unintentionally setting namespace on tasks outside of the scope of the current file. """ Register._default_namespace = namespace def id_to_name_and_params(task_id): # DEPRECATED import luigi.tools.parse_task return luigi.tools.parse_task.id_to_name_and_params(task_id) class Register(abc.ABCMeta): """ The Metaclass of :py:class:`Task`. Acts as a global registry of Tasks with the following properties: 1. Cache instances of objects so that eg. ``X(1, 2, 3)`` always returns the same object. 2. Keep track of all subclasses of :py:class:`Task` and expose them. """ __instance_cache = {} _default_namespace = None _reg = [] AMBIGUOUS_CLASS = object() # Placeholder denoting an error """If this value is returned by :py:meth:`get_reg` then there is an ambiguous task name (two :py:class:`Task` have the same name). This denotes an error.""" def __new__(metacls, classname, bases, classdict): """ Custom class creation for namespacing. Also register all subclasses. Set the task namespace to whatever the currently declared namespace is. """ if "task_namespace" not in classdict: classdict["task_namespace"] = metacls._default_namespace cls = super(Register, metacls).__new__(metacls, classname, bases, classdict) metacls._reg.append(cls) return cls def __call__(cls, *args, **kwargs): """ Custom class instantiation utilizing instance cache. If a Task has already been instantiated with the same parameters, the previous instance is returned to reduce number of object instances. """ def instantiate(): return super(Register, cls).__call__(*args, **kwargs) h = Register.__instance_cache if h is None: # disabled return instantiate() params = cls.get_params() param_values = cls.get_param_values(params, args, kwargs) k = (cls, tuple(param_values)) try: hash(k) except TypeError: logger.debug("Not all parameter values are hashable so instance isn't coming from the cache") return instantiate() # unhashable types in parameters if k not in h: h[k] = instantiate() return h[k] @classmethod def clear_instance_cache(cls): """ Clear/Reset the instance cache. """ Register.__instance_cache = {} @classmethod def disable_instance_cache(cls): """ Disables the instance cache. """ Register.__instance_cache = None @property def task_family(cls): """ The task family for the given class. If ``cls.task_namespace is None`` then it's the name of the class. Otherwise, ``<task_namespace>.`` is prefixed to the class name. """ if cls.task_namespace is None: return cls.__name__ else: return "%s.%s" % (cls.task_namespace, cls.__name__) @classmethod def get_reg(cls, include_config_without_section=False): """Return all of the registery classes. :return: a ``dict`` of task_family -> class """ # We have to do this on-demand in case task names have changed later reg = {} for cls in cls._reg: if cls.run == NotImplemented: continue if issubclass(cls, ConfigWithoutSection) and not include_config_without_section: continue name = cls.task_family if name in reg and reg[name] != cls and \ reg[name] != cls.AMBIGUOUS_CLASS and \ not issubclass(cls, reg[name]): # Registering two different classes - this means we can't instantiate them by name # The only exception is if one class is a subclass of the other. In that case, we # instantiate the most-derived class (this fixes some issues with decorator wrappers). reg[name] = cls.AMBIGUOUS_CLASS else: reg[name] = cls return reg @classmethod def tasks_str(cls): """ Human-readable register contents dump. """ return repr(sorted(Register.get_reg().keys())) @classmethod def get_task_cls(cls, name): """ Returns an unambiguous class or raises an exception. """ task_cls = Register.get_reg().get(name) if not task_cls: raise Exception('Task %r not found. Candidates are: %s' % (name, Register.tasks_str())) if task_cls == Register.AMBIGUOUS_CLASS: raise Exception('Task %r is ambiguous' % name) return task_cls @classmethod def get_all_params(cls): """ Compiles and returns all parameters for all :py:class:`Task`. :return: a ``dict`` of parameter name -> parameter. """ for task_name, task_cls in six.iteritems(cls.get_reg(include_config_without_section=True)): if task_cls == cls.AMBIGUOUS_CLASS: continue for param_name, param_obj in task_cls.get_params(): yield task_name, issubclass(task_cls, ConfigWithoutSection), param_name, param_obj @six.add_metaclass(Register) class Task(object): """ This is the base class of all Luigi Tasks, the base unit of work in Luigi. A Luigi Task describes a unit or work. The key methods of a Task, which must be implemented in a subclass are: * :py:meth:`run` - the computation done by this task. * :py:meth:`requires` - the list of Tasks that this Task depends on. * :py:meth:`output` - the output :py:class:`Target` that this Task creates. Parameters to the Task should be declared as members of the class, e.g.:: .. code-block:: python class MyTask(luigi.Task): count = luigi.IntParameter() Each Task exposes a constructor accepting all :py:class:`Parameter` (and values) as kwargs. e.g. ``MyTask(count=10)`` would instantiate `MyTask`. In addition to any declared properties and methods, there are a few non-declared properties, which are created by the :py:class:`Register` metaclass: ``Task.task_namespace`` optional string which is prepended to the task name for the sake of scheduling. If it isn't overridden in a Task, whatever was last declared using `luigi.namespace` will be used. ``Task._parameters`` list of ``(parameter_name, parameter)`` tuples for this task class """ _event_callbacks = {} # Priority of the task: the scheduler should favor available # tasks with higher priority values first. priority = 0 disabled = False # Resources used by the task. Should be formatted like {"scp": 1} to indicate that the # task requires 1 unit of the scp resource. resources = {} # Number of seconds after which to time out the run function. No timeout if set to 0. Defaults # to 0 or value in client.cfg worker_timeout = None @classmethod def event_handler(cls, event): """ Decorator for adding event handlers. """ def wrapped(callback): cls._event_callbacks.setdefault(cls, {}).setdefault(event, set()).add(callback) return callback return wrapped def trigger_event(self, event, *args, **kwargs): """ Trigger that calls all of the specified events associated with this class. """ for event_class, event_callbacks in six.iteritems(self._event_callbacks): if not isinstance(self, event_class): continue for callback in event_callbacks.get(event, []): try: # callbacks are protected callback(*args, **kwargs) except KeyboardInterrupt: return except BaseException: logger.exception("Error in event callback for %r", event) @property def task_module(self): # Returns what Python module to import to get access to this class # TODO(erikbern): we should think about a language-agnostic mechanism return self.__class__.__module__ @property def task_family(self): """ Convenience method since a property on the metaclass isn't directly accessible through the class instances. """ return self.__class__.task_family @classmethod def get_params(cls): """ Returns all of the Parameters for this Task. """ # We want to do this here and not at class instantiation, or else there is no room to extend classes dynamically params = [] for param_name in dir(cls): param_obj = getattr(cls, param_name) if not isinstance(param_obj, Parameter): continue params.append((param_name, param_obj)) # The order the parameters are created matters. See Parameter class params.sort(key=lambda t: t[1].counter) return params @classmethod def get_param_values(cls, params, args, kwargs): """ Get the values of the parameters from the args and kwargs. :param params: list of (param_name, Parameter). :param args: positional arguments :param kwargs: keyword arguments. :returns: list of `(name, value)` tuples, one for each parameter. """ result = {} params_dict = dict(params) task_name = cls.task_family # In case any exceptions are thrown, create a helpful description of how the Task was invoked # TODO: should we detect non-reprable arguments? These will lead to mysterious errors exc_desc = '%s[args=%s, kwargs=%s]' % (task_name, args, kwargs) # Fill in the positional arguments positional_params = [(n, p) for n, p in params if not p.is_global] for i, arg in enumerate(args): if i >= len(positional_params): raise parameter.UnknownParameterException('%s: takes at most %d parameters (%d given)' % (exc_desc, len(positional_params), len(args))) param_name, param_obj = positional_params[i] result[param_name] = arg # Then the optional arguments for param_name, arg in six.iteritems(kwargs): if param_name in result: raise parameter.DuplicateParameterException('%s: parameter %s was already set as a positional parameter' % (exc_desc, param_name)) if param_name not in params_dict: raise parameter.UnknownParameterException('%s: unknown parameter %s' % (exc_desc, param_name)) result[param_name] = arg # Then use the defaults for anything not filled in for param_name, param_obj in params: if param_name not in result: if not param_obj.has_task_value(task_name, param_name): raise parameter.MissingParameterException("%s: requires the '%s' parameter to be set" % (exc_desc, param_name)) result[param_name] = param_obj.task_value(task_name, param_name) def list_to_tuple(x): """ Make tuples out of lists and sets to allow hashing """ if isinstance(x, list) or isinstance(x, set): return tuple(x) else: return x # Sort it by the correct order and make a list return [(param_name, list_to_tuple(result[param_name])) for param_name, param_obj in params] def __init__(self, *args, **kwargs): """ Constructor to resolve values for all Parameters. For example, the Task: .. code-block:: python class MyTask(luigi.Task): count = luigi.IntParameter() can be instantiated as ``MyTask(count=10)``. """ params = self.get_params() param_values = self.get_param_values(params, args, kwargs) # Set all values on class instance for key, value in param_values: setattr(self, key, value) # Register args and kwargs as an attribute on the class. Might be useful self.param_args = tuple(value for key, value in param_values) self.param_kwargs = dict(param_values) # Build up task id task_id_parts = [] param_objs = dict(params) for param_name, param_value in param_values: if dict(params)[param_name].significant: task_id_parts.append('%s=%s' % (param_name, param_objs[param_name].serialize(param_value))) self.task_id = '%s(%s)' % (self.task_family, ', '.join(task_id_parts)) self.__hash = hash(self.task_id) def initialized(self): """ Returns ``True`` if the Task is initialized and ``False`` otherwise. """ return hasattr(self, 'task_id') @classmethod def from_str_params(cls, params_str=None): """ Creates an instance from a str->str hash. :param params_str: dict of param name -> value. """ if params_str is None: params_str = {} kwargs = {} for param_name, param in cls.get_params(): value = param.parse_from_input(param_name, params_str[param_name]) kwargs[param_name] = value return cls(**kwargs) def to_str_params(self): # Convert all parameters to a str->str hash params_str = {} params = dict(self.get_params()) for param_name, param_value in six.iteritems(self.param_kwargs): params_str[param_name] = params[param_name].serialize(param_value) return params_str def clone(self, cls=None, **kwargs): """ Creates a new instance from an existing instance where some of the args have changed. There's at least two scenarios where this is useful (see test/clone_test.py): * remove a lot of boiler plate when you have recursive dependencies and lots of args * there's task inheritance and some logic is on the base class :param cls: :param kwargs: :return: """ k = self.param_kwargs.copy() k.update(six.iteritems(kwargs)) if cls is None: cls = self.__class__ new_k = {} for param_name, param_class in cls.get_params(): if param_name in k: new_k[param_name] = k[param_name] return cls(**new_k) def __hash__(self): return self.__hash def __repr__(self): return self.task_id def __eq__(self, other): return self.__class__ == other.__class__ and self.param_args == other.param_args def complete(self): """ If the task has any outputs, return ``True`` if all outputs exists. Otherwise, return ``False``. However, you may freely override this method with custom logic. """ outputs = flatten(self.output()) if len(outputs) == 0: warnings.warn( "Task %r without outputs has no custom complete() method" % self, stacklevel=2 ) return False return all(map(lambda output: output.exists(), outputs)) @classmethod def bulk_complete(cls, parameter_tuples): """ Returns those of parameter_tuples for which this Task is complete. Override (with an efficient implementation) for efficient scheduling with range tools. Keep the logic consistent with that of complete(). """ raise NotImplementedError def output(self): """ The output that this Task produces. The output of the Task determines if the Task needs to be run--the task is considered finished iff the outputs all exist. Subclasses should override this method to return a single :py:class:`Target` or a list of :py:class:`Target` instances. Implementation note If running multiple workers, the output must be a resource that is accessible by all workers, such as a DFS or database. Otherwise, workers might compute the same output since they don't see the work done by other workers. """ return [] # default impl def requires(self): """ The Tasks that this Task depends on. A Task will only run if all of the Tasks that it requires are completed. If your Task does not require any other Tasks, then you don't need to override this method. Otherwise, a Subclasses can override this method to return a single Task, a list of Task instances, or a dict whose values are Task instances. """ return [] # default impl def _requires(self): """ Override in "template" tasks which themselves are supposed to be subclassed and thus have their requires() overridden (name preserved to provide consistent end-user experience), yet need to introduce (non-input) dependencies. Must return an iterable which among others contains the _requires() of the superclass. """ return flatten(self.requires()) # base impl def process_resources(self): """ Override in "template" tasks which provide common resource functionality but allow subclasses to specify additional resources while preserving the name for consistent end-user experience. """ return self.resources # default impl def input(self): """ Returns the outputs of the Tasks returned by :py:meth:`requires` :return: a list of :py:class:`Target` objects which are specified as outputs of all required Tasks. """ return getpaths(self.requires()) def deps(self): """ Internal method used by the scheduler. Returns the flattened list of requires. """ # used by scheduler return flatten(self._requires()) def run(self): """ The task run method, to be overridden in a subclass. """ pass # default impl def on_failure(self, exception): """ Override for custom error handling. This method gets called if an exception is raised in :py:meth:`run`. Return value of this method is json encoded and sent to the scheduler as the `expl` argument. Its string representation will be used as the body of the error email sent out if any. Default behavior is to return a string representation of the stack trace. """ traceback_string = traceback.format_exc() return "Runtime error:\n%s" % traceback_string def on_success(self): """ Override for doing custom completion handling for a larger class of tasks This method gets called when :py:meth:`run` completes without raising any exceptions. The returned value is json encoded and sent to the scheduler as the `expl` argument. Default behavior is to send an None value""" pass class MixinNaiveBulkComplete(object): """ Enables a Task to be efficiently scheduled with e.g. range tools, by providing a bulk_complete implementation which checks completeness in a loop. Applicable to tasks whose completeness checking is cheap. This doesn't exploit output location specific APIs for speed advantage, nevertheless removes redundant scheduler roundtrips. """ @classmethod def bulk_complete(cls, parameter_tuples): return [t for t in parameter_tuples if cls(t).complete()] def externalize(task): """ Returns an externalized version of the Task. See :py:class:`ExternalTask`. """ task.run = NotImplemented return task class ExternalTask(Task): """ Subclass for references to external dependencies. An ExternalTask's does not have a `run` implementation, which signifies to the framework that this Task's :py:meth:`output` is generated outside of Luigi. """ run = NotImplemented class WrapperTask(Task): """ Use for tasks that only wrap other tasks and that by definition are done if all their requirements exist. """ def complete(self): return all(r.complete() for r in flatten(self.requires())) class Config(Task): """Used for configuration that's not specific to a certain task TODO: let's refactor Task & Config so that it inherits from a common ParamContainer base class """ pass class ConfigWithoutSection(Task): """Used for configuration that doesn't have a particular section (eg. --n-workers) """ pass def getpaths(struct): """ Maps all Tasks in a structured data object to their .output(). """ if isinstance(struct, Task): return struct.output() elif isinstance(struct, dict): r = {} for k, v in six.iteritems(struct): r[k] = getpaths(v) return r else: # Remaining case: assume r is iterable... try: s = list(struct) except TypeError: raise Exception('Cannot map %s to Task/dict/list' % str(struct)) return [getpaths(r) for r in s] def flatten(struct): """ Creates a flat list of all all items in structured output (dicts, lists, items): .. code-block:: python >>> flatten({'a': 'foo', 'b': 'bar'}) ['foo', 'bar'] >>> flatten(['foo', ['bar', 'troll']]) ['foo', 'bar', 'troll'] >>> flatten('foo') ['foo'] >>> flatten(42) [42] """ if struct is None: return [] flat = [] if isinstance(struct, dict): for _, result in six.iteritems(struct): flat += flatten(result) return flat if isinstance(struct, six.string_types): return [struct] try: # if iterable for result in struct: flat += flatten(result) return flat except TypeError: pass return [struct] def flatten_output(task): """ Lists all output targets by recursively walking output-less (wrapper) tasks. FIXME order consistently. """ r = flatten(task.output()) if not r: for dep in flatten(task.requires()): r += flatten_output(dep) return r
BugsInPy/BugsInPy/temp/projects/luigi/bug-33-fixed/luigi/luigi/task.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-33-buggy/luigi/luigi/task.py
luigi-bug-17
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ This module contains the bindings for command line integration and dynamic loading of tasks If you don't want to run luigi from the command line. You may use the methods defined in this module to programatically run luigi. """ import logging import logging.config import os import sys import tempfile import signal import warnings from luigi import configuration from luigi import lock from luigi import parameter from luigi import rpc from luigi import scheduler from luigi import task from luigi import worker from luigi import execution_summary from luigi.cmdline_parser import CmdlineParser def setup_interface_logging(conf_file=None): # use a variable in the function object to determine if it has run before if getattr(setup_interface_logging, "has_run", False): return if conf_file is None: logger = logging.getLogger('luigi-interface') logger.setLevel(logging.DEBUG) stream_handler = logging.StreamHandler() stream_handler.setLevel(logging.DEBUG) formatter = logging.Formatter('%(levelname)s: %(message)s') stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) else: logging.config.fileConfig(conf_file, disable_existing_loggers=False) setup_interface_logging.has_run = True class core(task.Config): ''' Keeps track of a bunch of environment params. Uses the internal luigi parameter mechanism. The nice thing is that we can instantiate this class and get an object with all the environment variables set. This is arguably a bit of a hack. ''' use_cmdline_section = False local_scheduler = parameter.BoolParameter( default=False, description='Use an in-memory central scheduler. Useful for testing.', always_in_help=True) scheduler_host = parameter.Parameter( default='localhost', description='Hostname of machine running remote scheduler', config_path=dict(section='core', name='default-scheduler-host')) scheduler_port = parameter.IntParameter( default=8082, description='Port of remote scheduler api process', config_path=dict(section='core', name='default-scheduler-port')) scheduler_url = parameter.Parameter( default=None, description='Full path to remote scheduler', config_path=dict(section='core', name='default-scheduler-url'), ) lock_size = parameter.IntParameter( default=1, description="Maximum number of workers running the same command") no_lock = parameter.BoolParameter( default=False, description='Ignore if similar process is already running') lock_pid_dir = parameter.Parameter( default=os.path.join(tempfile.gettempdir(), 'luigi'), description='Directory to store the pid file') take_lock = parameter.BoolParameter( default=False, description='Signal other processes to stop getting work if already running') workers = parameter.IntParameter( default=1, description='Maximum number of parallel tasks to run') logging_conf_file = parameter.Parameter( default=None, description='Configuration file for logging') module = parameter.Parameter( default=None, description='Used for dynamic loading of modules', always_in_help=True) parallel_scheduling = parameter.BoolParameter( default=False, description='Use multiprocessing to do scheduling in parallel.') assistant = parameter.BoolParameter( default=False, description='Run any task from the scheduler.') help = parameter.BoolParameter( default=False, description='Show most common flags and all task-specific flags', always_in_help=True) help_all = parameter.BoolParameter( default=False, description='Show all command line flags', always_in_help=True) class _WorkerSchedulerFactory(object): def create_local_scheduler(self): return scheduler.CentralPlannerScheduler(prune_on_get_work=True) def create_remote_scheduler(self, url): return rpc.RemoteScheduler(url) def create_worker(self, scheduler, worker_processes, assistant=False): return worker.Worker( scheduler=scheduler, worker_processes=worker_processes, assistant=assistant) def _schedule_and_run(tasks, worker_scheduler_factory=None, override_defaults=None): """ :param tasks: :param worker_scheduler_factory: :param override_defaults: :return: True if all tasks and their dependencies were successfully run (or already completed); False if any error occurred. """ if worker_scheduler_factory is None: worker_scheduler_factory = _WorkerSchedulerFactory() if override_defaults is None: override_defaults = {} env_params = core(**override_defaults) # search for logging configuration path first on the command line, then # in the application config file logging_conf = env_params.logging_conf_file if logging_conf is not None and not os.path.exists(logging_conf): raise Exception( "Error: Unable to locate specified logging configuration file!" ) if not configuration.get_config().getboolean( 'core', 'no_configure_logging', False): setup_interface_logging(logging_conf) kill_signal = signal.SIGUSR1 if env_params.take_lock else None if (not env_params.no_lock and not(lock.acquire_for(env_params.lock_pid_dir, env_params.lock_size, kill_signal))): raise PidLockAlreadyTakenExit() if env_params.local_scheduler: sch = worker_scheduler_factory.create_local_scheduler() else: if env_params.scheduler_url is not None: url = env_params.scheduler_url else: url = 'http://{host}:{port:d}/'.format( host=env_params.scheduler_host, port=env_params.scheduler_port, ) sch = worker_scheduler_factory.create_remote_scheduler(url=url) worker = worker_scheduler_factory.create_worker( scheduler=sch, worker_processes=env_params.workers, assistant=env_params.assistant) success = True logger = logging.getLogger('luigi-interface') with worker: for t in tasks: success &= worker.add(t, env_params.parallel_scheduling) logger.info('Done scheduling tasks') if env_params.workers != 0: success &= worker.run() logger.info(execution_summary.summary(worker)) return dict(success=success, worker=worker) class PidLockAlreadyTakenExit(SystemExit): """ The exception thrown by :py:func:`luigi.run`, when the lock file is inaccessible """ pass def run(*args, **kwargs): return _run(*args, **kwargs)['success'] def _run(cmdline_args=None, main_task_cls=None, worker_scheduler_factory=None, use_dynamic_argparse=None, local_scheduler=False): """ Please dont use. Instead use `luigi` binary. Run from cmdline using argparse. :param cmdline_args: :param main_task_cls: :param worker_scheduler_factory: :param use_dynamic_argparse: Deprecated and ignored :param local_scheduler: """ if use_dynamic_argparse is not None: warnings.warn("use_dynamic_argparse is deprecated, don't set it.", DeprecationWarning, stacklevel=2) if cmdline_args is None: cmdline_args = sys.argv[1:] if main_task_cls: cmdline_args.insert(0, main_task_cls.task_family) if local_scheduler: cmdline_args.insert(0, '--local-scheduler') with CmdlineParser.global_instance(cmdline_args) as cp: return _schedule_and_run([cp.get_task_obj()], worker_scheduler_factory) def build(tasks, worker_scheduler_factory=None, **env_params): """ Run internally, bypassing the cmdline parsing. Useful if you have some luigi code that you want to run internally. Example: .. code-block:: python luigi.build([MyTask1(), MyTask2()], local_scheduler=True) One notable difference is that `build` defaults to not using the identical process lock. Otherwise, `build` would only be callable once from each process. :param tasks: :param worker_scheduler_factory: :param env_params: :return: True if there were no scheduling errors, even if tasks may fail. """ if "no_lock" not in env_params: env_params["no_lock"] = True return _schedule_and_run(tasks, worker_scheduler_factory, override_defaults=env_params)['success'] # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ This module contains the bindings for command line integration and dynamic loading of tasks If you don't want to run luigi from the command line. You may use the methods defined in this module to programatically run luigi. """ import logging import logging.config import os import sys import tempfile import signal import warnings from luigi import configuration from luigi import lock from luigi import parameter from luigi import rpc from luigi import scheduler from luigi import task from luigi import worker from luigi import execution_summary from luigi.cmdline_parser import CmdlineParser def setup_interface_logging(conf_file=None): # use a variable in the function object to determine if it has run before if getattr(setup_interface_logging, "has_run", False): return if conf_file is None: logger = logging.getLogger('luigi-interface') logger.setLevel(logging.DEBUG) stream_handler = logging.StreamHandler() stream_handler.setLevel(logging.DEBUG) formatter = logging.Formatter('%(levelname)s: %(message)s') stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) else: logging.config.fileConfig(conf_file, disable_existing_loggers=False) setup_interface_logging.has_run = True class core(task.Config): ''' Keeps track of a bunch of environment params. Uses the internal luigi parameter mechanism. The nice thing is that we can instantiate this class and get an object with all the environment variables set. This is arguably a bit of a hack. ''' use_cmdline_section = False local_scheduler = parameter.BoolParameter( default=False, description='Use an in-memory central scheduler. Useful for testing.', always_in_help=True) scheduler_host = parameter.Parameter( default='localhost', description='Hostname of machine running remote scheduler', config_path=dict(section='core', name='default-scheduler-host')) scheduler_port = parameter.IntParameter( default=8082, description='Port of remote scheduler api process', config_path=dict(section='core', name='default-scheduler-port')) scheduler_url = parameter.Parameter( default=None, description='Full path to remote scheduler', config_path=dict(section='core', name='default-scheduler-url'), ) lock_size = parameter.IntParameter( default=1, description="Maximum number of workers running the same command") no_lock = parameter.BoolParameter( default=False, description='Ignore if similar process is already running') lock_pid_dir = parameter.Parameter( default=os.path.join(tempfile.gettempdir(), 'luigi'), description='Directory to store the pid file') take_lock = parameter.BoolParameter( default=False, description='Signal other processes to stop getting work if already running') workers = parameter.IntParameter( default=1, description='Maximum number of parallel tasks to run') logging_conf_file = parameter.Parameter( default=None, description='Configuration file for logging') module = parameter.Parameter( default=None, description='Used for dynamic loading of modules', always_in_help=True) parallel_scheduling = parameter.BoolParameter( default=False, description='Use multiprocessing to do scheduling in parallel.') assistant = parameter.BoolParameter( default=False, description='Run any task from the scheduler.') help = parameter.BoolParameter( default=False, description='Show most common flags and all task-specific flags', always_in_help=True) help_all = parameter.BoolParameter( default=False, description='Show all command line flags', always_in_help=True) class _WorkerSchedulerFactory(object): def create_local_scheduler(self): return scheduler.CentralPlannerScheduler(prune_on_get_work=True) def create_remote_scheduler(self, url): return rpc.RemoteScheduler(url) def create_worker(self, scheduler, worker_processes, assistant=False): return worker.Worker( scheduler=scheduler, worker_processes=worker_processes, assistant=assistant) def _schedule_and_run(tasks, worker_scheduler_factory=None, override_defaults=None): """ :param tasks: :param worker_scheduler_factory: :param override_defaults: :return: True if all tasks and their dependencies were successfully run (or already completed); False if any error occurred. """ if worker_scheduler_factory is None: worker_scheduler_factory = _WorkerSchedulerFactory() if override_defaults is None: override_defaults = {} env_params = core(**override_defaults) # search for logging configuration path first on the command line, then # in the application config file logging_conf = env_params.logging_conf_file if logging_conf is not None and not os.path.exists(logging_conf): raise Exception( "Error: Unable to locate specified logging configuration file!" ) if not configuration.get_config().getboolean( 'core', 'no_configure_logging', False): setup_interface_logging(logging_conf) kill_signal = signal.SIGUSR1 if env_params.take_lock else None if (not env_params.no_lock and not(lock.acquire_for(env_params.lock_pid_dir, env_params.lock_size, kill_signal))): raise PidLockAlreadyTakenExit() if env_params.local_scheduler: sch = worker_scheduler_factory.create_local_scheduler() else: if env_params.scheduler_url is not None: url = env_params.scheduler_url else: url = 'http://{host}:{port:d}/'.format( host=env_params.scheduler_host, port=env_params.scheduler_port, ) sch = worker_scheduler_factory.create_remote_scheduler(url=url) worker = worker_scheduler_factory.create_worker( scheduler=sch, worker_processes=env_params.workers, assistant=env_params.assistant) success = True logger = logging.getLogger('luigi-interface') with worker: for t in tasks: success &= worker.add(t, env_params.parallel_scheduling) logger.info('Done scheduling tasks') if env_params.workers != 0: success &= worker.run() logger.info(execution_summary.summary(worker)) return dict(success=success, worker=worker) class PidLockAlreadyTakenExit(SystemExit): """ The exception thrown by :py:func:`luigi.run`, when the lock file is inaccessible """ pass def run(*args, **kwargs): return _run(*args, **kwargs)['success'] def _run(cmdline_args=None, main_task_cls=None, worker_scheduler_factory=None, use_dynamic_argparse=None, local_scheduler=False): """ Please dont use. Instead use `luigi` binary. Run from cmdline using argparse. :param cmdline_args: :param main_task_cls: :param worker_scheduler_factory: :param use_dynamic_argparse: Deprecated and ignored :param local_scheduler: """ if use_dynamic_argparse is not None: warnings.warn("use_dynamic_argparse is deprecated, don't set it.", DeprecationWarning, stacklevel=2) if cmdline_args is None: cmdline_args = sys.argv[1:] if main_task_cls: cmdline_args.insert(0, main_task_cls.task_family) if local_scheduler: cmdline_args.insert(0, '--local-scheduler') with CmdlineParser.global_instance(cmdline_args) as cp: return _schedule_and_run([cp.get_task_obj()], worker_scheduler_factory) def build(tasks, worker_scheduler_factory=None, **env_params): """ Run internally, bypassing the cmdline parsing. Useful if you have some luigi code that you want to run internally. Example: .. code-block:: python luigi.build([MyTask1(), MyTask2()], local_scheduler=True) One notable difference is that `build` defaults to not using the identical process lock. Otherwise, `build` would only be callable once from each process. :param tasks: :param worker_scheduler_factory: :param env_params: :return: True if there were no scheduling errors, even if tasks may fail. """ if "no_lock" not in env_params: env_params["no_lock"] = True return _schedule_and_run(tasks, worker_scheduler_factory, override_defaults=env_params)['success']
BugsInPy/BugsInPy/temp/projects/luigi/bug-17-fixed/luigi/luigi/interface.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-17-buggy/luigi/luigi/interface.py
luigi-bug-21
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ This module contains the bindings for command line integration and dynamic loading of tasks """ import argparse import logging import logging.config import os import sys import tempfile from luigi import configuration from luigi import lock from luigi import parameter from luigi import rpc from luigi import scheduler from luigi import task from luigi import worker from luigi.task_register import Register def setup_interface_logging(conf_file=None): # use a variable in the function object to determine if it has run before if getattr(setup_interface_logging, "has_run", False): return if conf_file is None: logger = logging.getLogger('luigi-interface') logger.setLevel(logging.DEBUG) stream_handler = logging.StreamHandler() stream_handler.setLevel(logging.DEBUG) formatter = logging.Formatter('%(levelname)s: %(message)s') stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) else: logging.config.fileConfig(conf_file, disable_existing_loggers=False) setup_interface_logging.has_run = True class core(task.Config): ''' Keeps track of a bunch of environment params. Uses the internal luigi parameter mechanism. The nice thing is that we can instantiate this class and get an object with all the environment variables set. This is arguably a bit of a hack. ''' use_cmdline_section = False local_scheduler = parameter.BoolParameter( default=False, description='Use local scheduling') scheduler_host = parameter.Parameter( default='localhost', description='Hostname of machine running remote scheduler', config_path=dict(section='core', name='default-scheduler-host')) scheduler_port = parameter.IntParameter( default=8082, description='Port of remote scheduler api process', config_path=dict(section='core', name='default-scheduler-port')) lock_size = parameter.IntParameter( default=1, description="Maximum number of workers running the same command") no_lock = parameter.BoolParameter( default=False, description='Ignore if similar process is already running') lock_pid_dir = parameter.Parameter( default=os.path.join(tempfile.gettempdir(), 'luigi'), description='Directory to store the pid file') workers = parameter.IntParameter( default=1, description='Maximum number of parallel tasks to run') logging_conf_file = parameter.Parameter( default=None, description='Configuration file for logging') module = parameter.Parameter( default=None, description='Used for dynamic loading of modules') # see DynamicArgParseInterface parallel_scheduling = parameter.BoolParameter( default=False, description='Use multiprocessing to do scheduling in parallel.') assistant = parameter.BoolParameter( default=False, description='Run any task from the scheduler.') class WorkerSchedulerFactory(object): def create_local_scheduler(self): return scheduler.CentralPlannerScheduler(prune_on_get_work=True) def create_remote_scheduler(self, host, port): return rpc.RemoteScheduler(host=host, port=port) def create_worker(self, scheduler, worker_processes, assistant=False): return worker.Worker( scheduler=scheduler, worker_processes=worker_processes, assistant=assistant) class Interface(object): def parse(self): raise NotImplementedError @staticmethod def run(tasks, worker_scheduler_factory=None, override_defaults=None): """ :param tasks: :param worker_scheduler_factory: :param override_defaults: :return: True if all tasks and their dependencies were successfully run (or already completed); False if any error occurred. """ if worker_scheduler_factory is None: worker_scheduler_factory = WorkerSchedulerFactory() if override_defaults is None: override_defaults = {} env_params = core(**override_defaults) # search for logging configuration path first on the command line, then # in the application config file logging_conf = env_params.logging_conf_file if logging_conf is not None and not os.path.exists(logging_conf): raise Exception( "Error: Unable to locate specified logging configuration file!" ) if not configuration.get_config().getboolean( 'core', 'no_configure_logging', False): setup_interface_logging(logging_conf) if (not env_params.no_lock and not(lock.acquire_for(env_params.lock_pid_dir, env_params.lock_size))): sys.exit(1) if env_params.local_scheduler: sch = worker_scheduler_factory.create_local_scheduler() else: sch = worker_scheduler_factory.create_remote_scheduler( host=env_params.scheduler_host, port=env_params.scheduler_port) w = worker_scheduler_factory.create_worker( scheduler=sch, worker_processes=env_params.workers, assistant=env_params.assistant) success = True for t in tasks: success &= w.add(t, env_params.parallel_scheduling) logger = logging.getLogger('luigi-interface') logger.info('Done scheduling tasks') if env_params.workers != 0: success &= w.run() w.stop() return success # Simple unweighted Levenshtein distance def _editdistance(a, b): r0 = range(0, len(b) + 1) r1 = [0] * (len(b) + 1) for i in range(0, len(a)): r1[0] = i + 1 for j in range(0, len(b)): c = 0 if a[i] is b[j] else 1 r1[j + 1] = min(r1[j] + 1, r0[j + 1] + 1, r0[j] + c) r0 = r1[:] return r1[len(b)] def error_task_names(task_name, task_names): weighted_tasks = [(_editdistance(task_name, task_name_2), task_name_2) for task_name_2 in task_names] ordered_tasks = sorted(weighted_tasks, key=lambda pair: pair[0]) candidates = [task for (dist, task) in ordered_tasks if dist <= 5 and dist < len(task)] display_string = "" if candidates: display_string = "No task %s. Did you mean:\n%s" % (task_name, '\n'.join(candidates)) else: display_string = "No task %s." % task_name raise SystemExit(display_string) def add_task_parameters(parser, task_cls): for param_name, param in task_cls.get_params(): param.add_to_cmdline_parser(parser, param_name, task_cls.task_family, glob=False) def get_global_parameters(): seen_params = set() for task_name, is_without_section, param_name, param in Register.get_all_params(): if param in seen_params: continue seen_params.add(param) yield task_name, is_without_section, param_name, param def add_global_parameters(parser): for task_name, is_without_section, param_name, param in get_global_parameters(): param.add_to_cmdline_parser(parser, param_name, task_name, glob=True, is_without_section=is_without_section) def get_task_parameters(task_cls, args): # Parse a str->str dict to the correct types params = {} for param_name, param in task_cls.get_params(): param.parse_from_args(param_name, task_cls.task_family, args, params) return params def set_global_parameters(args): # Note that this is not side effect free for task_name, is_without_section, param_name, param in get_global_parameters(): param.set_global_from_args(param_name, task_name, args, is_without_section=is_without_section) class ArgParseInterface(Interface): """ Takes the task as the command, with parameters specific to it. """ def parse_task(self, cmdline_args=None): if cmdline_args is None: cmdline_args = sys.argv[1:] parser = argparse.ArgumentParser() add_global_parameters(parser) task_names = Register.task_names() # Parse global arguments and pull out the task name. # We used to do this using subparsers+command, but some issues with # argparse across different versions of Python (2.7.9) made it hard. args, unknown = parser.parse_known_args(args=[a for a in cmdline_args if a != '--help']) if len(unknown) == 0: # In case it included a --help argument, run again parser.parse_known_args(args=cmdline_args) raise SystemExit('No task specified') task_name = unknown[0] if task_name not in task_names: error_task_names(task_name, task_names) task_cls = Register.get_task_cls(task_name) # Add a subparser to parse task-specific arguments subparsers = parser.add_subparsers(dest='command') subparser = subparsers.add_parser(task_name) # Add both task and global params here so that we can support both: # test.py --global-param xyz Test --n 42 # test.py Test --n 42 --global-param xyz add_global_parameters(subparser) add_task_parameters(subparser, task_cls) # Workaround for bug in argparse for Python 2.7.9 # See https://mail.python.org/pipermail/python-dev/2015-January/137699.html subargs = parser.parse_args(args=cmdline_args) for key, value in vars(subargs).items(): if value: # Either True (for boolean args) or non-None (everything else) setattr(args, key, value) # Notice that this is not side effect free because it might set global params set_global_parameters(args) task_params = get_task_parameters(task_cls, args) return [task_cls(**task_params)] def parse(self, cmdline_args=None): return self.parse_task(cmdline_args) class DynamicArgParseInterface(ArgParseInterface): """ Uses --module as a way to load modules dynamically Usage: .. code-block:: console python whatever.py --module foo_module FooTask --blah xyz --x 123 This will dynamically import foo_module and then try to create FooTask from this. """ def parse(self, cmdline_args=None): if cmdline_args is None: cmdline_args = sys.argv[1:] parser = argparse.ArgumentParser() add_global_parameters(parser) args, unknown = parser.parse_known_args(args=[a for a in cmdline_args if a != '--help']) module = args.module __import__(module) return self.parse_task(cmdline_args) def run(cmdline_args=None, main_task_cls=None, worker_scheduler_factory=None, use_dynamic_argparse=False, local_scheduler=False): """ Please dont use. Instead use `luigi` binary. Run from cmdline using argparse. :param cmdline_args: :param main_task_cls: :param worker_scheduler_factory: :param use_dynamic_argparse: :param local_scheduler: """ if use_dynamic_argparse: interface = DynamicArgParseInterface() else: interface = ArgParseInterface() if main_task_cls: cmdline_args.insert(0, main_task_cls.task_family) if local_scheduler: cmdline_args.insert(0, '--local-scheduler') tasks = interface.parse(cmdline_args) return interface.run(tasks, worker_scheduler_factory) def build(tasks, worker_scheduler_factory=None, **env_params): """ Run internally, bypassing the cmdline parsing. Useful if you have some luigi code that you want to run internally. Example: .. code-block:: python luigi.build([MyTask1(), MyTask2()], local_scheduler=True) One notable difference is that `build` defaults to not using the identical process lock. Otherwise, `build` would only be callable once from each process. :param tasks: :param worker_scheduler_factory: :param env_params: :return: """ if "no_lock" not in env_params: env_params["no_lock"] = True Interface.run(tasks, worker_scheduler_factory, override_defaults=env_params) # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ This module contains the bindings for command line integration and dynamic loading of tasks """ import argparse import logging import logging.config import os import sys import tempfile from luigi import configuration from luigi import lock from luigi import parameter from luigi import rpc from luigi import scheduler from luigi import task from luigi import worker from luigi.task_register import Register def setup_interface_logging(conf_file=None): # use a variable in the function object to determine if it has run before if getattr(setup_interface_logging, "has_run", False): return if conf_file is None: logger = logging.getLogger('luigi-interface') logger.setLevel(logging.DEBUG) stream_handler = logging.StreamHandler() stream_handler.setLevel(logging.DEBUG) formatter = logging.Formatter('%(levelname)s: %(message)s') stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) else: logging.config.fileConfig(conf_file, disable_existing_loggers=False) setup_interface_logging.has_run = True class core(task.Config): ''' Keeps track of a bunch of environment params. Uses the internal luigi parameter mechanism. The nice thing is that we can instantiate this class and get an object with all the environment variables set. This is arguably a bit of a hack. ''' use_cmdline_section = False local_scheduler = parameter.BoolParameter( default=False, description='Use local scheduling') scheduler_host = parameter.Parameter( default='localhost', description='Hostname of machine running remote scheduler', config_path=dict(section='core', name='default-scheduler-host')) scheduler_port = parameter.IntParameter( default=8082, description='Port of remote scheduler api process', config_path=dict(section='core', name='default-scheduler-port')) lock_size = parameter.IntParameter( default=1, description="Maximum number of workers running the same command") no_lock = parameter.BoolParameter( default=False, description='Ignore if similar process is already running') lock_pid_dir = parameter.Parameter( default=os.path.join(tempfile.gettempdir(), 'luigi'), description='Directory to store the pid file') workers = parameter.IntParameter( default=1, description='Maximum number of parallel tasks to run') logging_conf_file = parameter.Parameter( default=None, description='Configuration file for logging') module = parameter.Parameter( default=None, description='Used for dynamic loading of modules') # see DynamicArgParseInterface parallel_scheduling = parameter.BoolParameter( default=False, description='Use multiprocessing to do scheduling in parallel.') assistant = parameter.BoolParameter( default=False, description='Run any task from the scheduler.') class WorkerSchedulerFactory(object): def create_local_scheduler(self): return scheduler.CentralPlannerScheduler(prune_on_get_work=True) def create_remote_scheduler(self, host, port): return rpc.RemoteScheduler(host=host, port=port) def create_worker(self, scheduler, worker_processes, assistant=False): return worker.Worker( scheduler=scheduler, worker_processes=worker_processes, assistant=assistant) class Interface(object): def parse(self): raise NotImplementedError @staticmethod def run(tasks, worker_scheduler_factory=None, override_defaults=None): """ :param tasks: :param worker_scheduler_factory: :param override_defaults: :return: True if all tasks and their dependencies were successfully run (or already completed); False if any error occurred. """ if worker_scheduler_factory is None: worker_scheduler_factory = WorkerSchedulerFactory() if override_defaults is None: override_defaults = {} env_params = core(**override_defaults) # search for logging configuration path first on the command line, then # in the application config file logging_conf = env_params.logging_conf_file if logging_conf is not None and not os.path.exists(logging_conf): raise Exception( "Error: Unable to locate specified logging configuration file!" ) if not configuration.get_config().getboolean( 'core', 'no_configure_logging', False): setup_interface_logging(logging_conf) if (not env_params.no_lock and not(lock.acquire_for(env_params.lock_pid_dir, env_params.lock_size))): sys.exit(1) if env_params.local_scheduler: sch = worker_scheduler_factory.create_local_scheduler() else: sch = worker_scheduler_factory.create_remote_scheduler( host=env_params.scheduler_host, port=env_params.scheduler_port) w = worker_scheduler_factory.create_worker( scheduler=sch, worker_processes=env_params.workers, assistant=env_params.assistant) success = True for t in tasks: success &= w.add(t, env_params.parallel_scheduling) logger = logging.getLogger('luigi-interface') logger.info('Done scheduling tasks') if env_params.workers != 0: success &= w.run() w.stop() return success # Simple unweighted Levenshtein distance def _editdistance(a, b): r0 = range(0, len(b) + 1) r1 = [0] * (len(b) + 1) for i in range(0, len(a)): r1[0] = i + 1 for j in range(0, len(b)): c = 0 if a[i] is b[j] else 1 r1[j + 1] = min(r1[j] + 1, r0[j + 1] + 1, r0[j] + c) r0 = r1[:] return r1[len(b)] def error_task_names(task_name, task_names): weighted_tasks = [(_editdistance(task_name, task_name_2), task_name_2) for task_name_2 in task_names] ordered_tasks = sorted(weighted_tasks, key=lambda pair: pair[0]) candidates = [task for (dist, task) in ordered_tasks if dist <= 5 and dist < len(task)] display_string = "" if candidates: display_string = "No task %s. Did you mean:\n%s" % (task_name, '\n'.join(candidates)) else: display_string = "No task %s." % task_name raise SystemExit(display_string) def add_task_parameters(parser, task_cls): for param_name, param in task_cls.get_params(): param.add_to_cmdline_parser(parser, param_name, task_cls.task_family, glob=False) def get_global_parameters(): seen_params = set() for task_name, is_without_section, param_name, param in Register.get_all_params(): if param in seen_params: continue seen_params.add(param) yield task_name, is_without_section, param_name, param def add_global_parameters(parser): for task_name, is_without_section, param_name, param in get_global_parameters(): param.add_to_cmdline_parser(parser, param_name, task_name, glob=True, is_without_section=is_without_section) def get_task_parameters(task_cls, args): # Parse a str->str dict to the correct types params = {} for param_name, param in task_cls.get_params(): param.parse_from_args(param_name, task_cls.task_family, args, params) return params def set_global_parameters(args): # Note that this is not side effect free for task_name, is_without_section, param_name, param in get_global_parameters(): param.set_global_from_args(param_name, task_name, args, is_without_section=is_without_section) class ArgParseInterface(Interface): """ Takes the task as the command, with parameters specific to it. """ def parse_task(self, cmdline_args=None): if cmdline_args is None: cmdline_args = sys.argv[1:] parser = argparse.ArgumentParser() add_global_parameters(parser) task_names = Register.task_names() # Parse global arguments and pull out the task name. # We used to do this using subparsers+command, but some issues with # argparse across different versions of Python (2.7.9) made it hard. args, unknown = parser.parse_known_args(args=[a for a in cmdline_args if a != '--help']) if len(unknown) == 0: # In case it included a --help argument, run again parser.parse_known_args(args=cmdline_args) raise SystemExit('No task specified') task_name = unknown[0] if task_name not in task_names: error_task_names(task_name, task_names) task_cls = Register.get_task_cls(task_name) # Add a subparser to parse task-specific arguments subparsers = parser.add_subparsers(dest='command') subparser = subparsers.add_parser(task_name) # Add both task and global params here so that we can support both: # test.py --global-param xyz Test --n 42 # test.py Test --n 42 --global-param xyz add_global_parameters(subparser) add_task_parameters(subparser, task_cls) # Workaround for bug in argparse for Python 2.7.9 # See https://mail.python.org/pipermail/python-dev/2015-January/137699.html subargs = parser.parse_args(args=cmdline_args) for key, value in vars(subargs).items(): if value: # Either True (for boolean args) or non-None (everything else) setattr(args, key, value) # Notice that this is not side effect free because it might set global params set_global_parameters(args) task_params = get_task_parameters(task_cls, args) return [task_cls(**task_params)] def parse(self, cmdline_args=None): return self.parse_task(cmdline_args) class DynamicArgParseInterface(ArgParseInterface): """ Uses --module as a way to load modules dynamically Usage: .. code-block:: console python whatever.py --module foo_module FooTask --blah xyz --x 123 This will dynamically import foo_module and then try to create FooTask from this. """ def parse(self, cmdline_args=None): if cmdline_args is None: cmdline_args = sys.argv[1:] parser = argparse.ArgumentParser() add_global_parameters(parser) args, unknown = parser.parse_known_args(args=[a for a in cmdline_args if a != '--help']) module = args.module __import__(module) return self.parse_task(cmdline_args) def run(cmdline_args=None, main_task_cls=None, worker_scheduler_factory=None, use_dynamic_argparse=False, local_scheduler=False): """ Please dont use. Instead use `luigi` binary. Run from cmdline using argparse. :param cmdline_args: :param main_task_cls: :param worker_scheduler_factory: :param use_dynamic_argparse: :param local_scheduler: """ if use_dynamic_argparse: interface = DynamicArgParseInterface() else: interface = ArgParseInterface() if main_task_cls: cmdline_args.insert(0, main_task_cls.task_family) if local_scheduler: cmdline_args.insert(0, '--local-scheduler') tasks = interface.parse(cmdline_args) return interface.run(tasks, worker_scheduler_factory) def build(tasks, worker_scheduler_factory=None, **env_params): """ Run internally, bypassing the cmdline parsing. Useful if you have some luigi code that you want to run internally. Example: .. code-block:: python luigi.build([MyTask1(), MyTask2()], local_scheduler=True) One notable difference is that `build` defaults to not using the identical process lock. Otherwise, `build` would only be callable once from each process. :param tasks: :param worker_scheduler_factory: :param env_params: :return: """ if "no_lock" not in env_params: env_params["no_lock"] = True Interface.run(tasks, worker_scheduler_factory, override_defaults=env_params)
BugsInPy/BugsInPy/temp/projects/luigi/bug-21-fixed/luigi/luigi/interface.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-21-buggy/luigi/luigi/interface.py
luigi-bug-19
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The system for scheduling tasks and executing them in order. Deals with dependencies, priorities, resources, etc. The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them. See :doc:`/central_scheduler` for more info. """ import collections try: import cPickle as pickle except ImportError: import pickle import datetime import functools import itertools import logging import os import time from luigi import six from luigi import configuration from luigi import notifications from luigi import parameter from luigi import task_history as history from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN from luigi.task import Config logger = logging.getLogger("luigi.server") class Scheduler(object): """ Abstract base class. Note that the methods all take string arguments, not Task objects... """"" add_task = NotImplemented get_work = NotImplemented ping = NotImplemented UPSTREAM_RUNNING = 'UPSTREAM_RUNNING' UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT' UPSTREAM_FAILED = 'UPSTREAM_FAILED' UPSTREAM_DISABLED = 'UPSTREAM_DISABLED' UPSTREAM_SEVERITY_ORDER = ( '', UPSTREAM_RUNNING, UPSTREAM_MISSING_INPUT, UPSTREAM_FAILED, UPSTREAM_DISABLED, ) UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index STATUS_TO_UPSTREAM_MAP = { FAILED: UPSTREAM_FAILED, RUNNING: UPSTREAM_RUNNING, PENDING: UPSTREAM_MISSING_INPUT, DISABLED: UPSTREAM_DISABLED, } class scheduler(Config): # TODO(erikbern): the config_path is needed for backwards compatilibity. We should drop the compatibility # at some point (in particular this would force users to replace all dashes with underscores in the config) retry_delay = parameter.FloatParameter(default=900.0) remove_delay = parameter.FloatParameter(default=600.0) worker_disconnect_delay = parameter.FloatParameter(default=60.0) state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle') # Jobs are disabled if we see more than disable_failures failures in disable_window seconds. # These disables last for disable_persist seconds. disable_window = parameter.IntParameter(default=3600, config_path=dict(section='scheduler', name='disable-window-seconds')) disable_failures = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-num-failures')) disable_hard_timeout = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-hard-timeout')) disable_persist = parameter.IntParameter(default=86400, config_path=dict(section='scheduler', name='disable-persist-seconds')) max_shown_tasks = parameter.IntParameter(default=100000) prune_done_tasks = parameter.BoolParameter(default=False) record_task_history = parameter.BoolParameter(default=False) prune_on_get_work = parameter.BoolParameter(default=False) def fix_time(x): # Backwards compatibility for a fix in Dec 2014. Prior to the fix, pickled state might store datetime objects # Let's remove this function soon if isinstance(x, datetime.datetime): return time.mktime(x.timetuple()) else: return x class Failures(object): """ This class tracks the number of failures in a given time window. Failures added are marked with the current timestamp, and this class counts the number of failures in a sliding time window ending at the present. """ def __init__(self, window): """ Initialize with the given window. :param window: how long to track failures for, as a float (number of seconds). """ self.window = window self.failures = collections.deque() self.first_failure_time = None def add_failure(self): """ Add a failure event with the current timestamp. """ failure_time = time.time() if not self.first_failure_time: self.first_failure_time = failure_time self.failures.append(failure_time) def num_failures(self): """ Return the number of failures in the window. """ min_time = time.time() - self.window while self.failures and fix_time(self.failures[0]) < min_time: self.failures.popleft() return len(self.failures) def clear(self): """ Clear the failure queue. """ self.failures.clear() def _get_default(x, default): if x is not None: return x else: return default class Task(object): def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None, params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None, tracking_url=None): self.id = task_id self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active) self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active if deps is None: self.deps = set() else: self.deps = set(deps) self.status = status # PENDING, RUNNING, FAILED or DONE self.time = time.time() # Timestamp when task was first added self.retry = None self.remove = None self.worker_running = None # the worker id that is currently running the task or None self.time_running = None # Timestamp when picked up by worker self.expl = None self.priority = priority self.resources = _get_default(resources, {}) self.family = family self.module = module self.params = _get_default(params, {}) self.disable_failures = disable_failures self.disable_hard_timeout = disable_hard_timeout self.failures = Failures(disable_window) self.tracking_url = tracking_url self.scheduler_disable_time = None self.runnable = False def __repr__(self): return "Task(%r)" % vars(self) def add_failure(self): self.failures.add_failure() def has_excessive_failures(self): if (self.failures.first_failure_time is not None and self.disable_hard_timeout): if (time.time() >= self.failures.first_failure_time + self.disable_hard_timeout): return True if self.failures.num_failures() >= self.disable_failures: return True return False def can_disable(self): return (self.disable_failures is not None or self.disable_hard_timeout is not None) class Worker(object): """ Structure for tracking worker activity and keeping their references. """ def __init__(self, worker_id, last_active=None): self.id = worker_id self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host) self.last_active = last_active or time.time() # seconds since epoch self.last_get_work = None self.started = time.time() # seconds since epoch self.tasks = set() # task objects self.info = {} def add_info(self, info): self.info.update(info) def update(self, worker_reference, get_work=False): if worker_reference: self.reference = worker_reference self.last_active = time.time() if get_work: self.last_get_work = time.time() def prune(self, config): # Delete workers that haven't said anything for a while (probably killed) if self.last_active + config.worker_disconnect_delay < time.time(): return True def get_pending_tasks(self, state): """ Get PENDING (and RUNNING) tasks for this worker. You have to pass in the state for optimization reasons. """ if len(self.tasks) < state.num_pending_tasks(): return six.moves.filter(lambda task: task.status in [PENDING, RUNNING], self.tasks) else: return state.get_pending_tasks() def is_trivial_worker(self, state): """ If it's not an assistant having only tasks that are without requirements. We have to pass the state parameter for optimization reasons. """ if self.assistant: return False return all(not task.resources for task in self.get_pending_tasks(state)) @property def assistant(self): return self.info.get('assistant', False) def __str__(self): return self.id class SimpleTaskState(object): """ Keep track of the current state and handle persistance. The point of this class is to enable other ways to keep state, eg. by using a database These will be implemented by creating an abstract base class that this and other classes inherit from. """ def __init__(self, state_path): self._state_path = state_path self._tasks = {} # map from id to a Task object self._status_tasks = collections.defaultdict(dict) self._active_workers = {} # map from id to a Worker object def get_state(self): return self._tasks, self._active_workers def set_state(self, state): self._tasks, self._active_workers = state def dump(self): try: with open(self._state_path, 'wb') as fobj: pickle.dump(self.get_state(), fobj) except IOError: logger.warning("Failed saving scheduler state", exc_info=1) else: logger.info("Saved state in %s", self._state_path) # prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control? def load(self): if os.path.exists(self._state_path): logger.info("Attempting to load state from %s", self._state_path) try: with open(self._state_path, 'rb') as fobj: state = pickle.load(fobj) except BaseException: logger.exception("Error when loading state. Starting from clean slate.") return self.set_state(state) self._status_tasks = collections.defaultdict(dict) for task in six.itervalues(self._tasks): self._status_tasks[task.status][task.id] = task # Convert from old format # TODO: this is really ugly, we need something more future-proof # Every time we add an attribute to the Worker or Task class, this # code needs to be updated # Compatibility since 2014-06-02 for k, v in six.iteritems(self._active_workers): if isinstance(v, float): self._active_workers[k] = Worker(worker_id=k, last_active=v) # Compatibility since 2015-05-28 if any(not hasattr(w, 'tasks') for k, w in six.iteritems(self._active_workers)): # If you load from an old format where Workers don't contain tasks. for k, worker in six.iteritems(self._active_workers): worker.tasks = set() for task in six.itervalues(self._tasks): for worker_id in task.workers: self._active_workers[worker_id].tasks.add(task) # Compatibility since 2015-04-28 if any(not hasattr(t, 'disable_hard_timeout') for t in six.itervalues(self._tasks)): for t in six.itervalues(self._tasks): t.disable_hard_timeout = None else: logger.info("No prior state file exists at %s. Starting with clean slate", self._state_path) def get_active_tasks(self, status=None): if status: for task in six.itervalues(self._status_tasks[status]): yield task else: for task in six.itervalues(self._tasks): yield task def get_running_tasks(self): return six.itervalues(self._status_tasks[RUNNING]) def get_pending_tasks(self): return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status]) for status in [PENDING, RUNNING]) def num_pending_tasks(self): """ Return how many tasks are PENDING + RUNNING. O(1). """ return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING]) def get_task(self, task_id, default=None, setdefault=None): if setdefault: task = self._tasks.setdefault(task_id, setdefault) self._status_tasks[task.status][task.id] = task return task else: return self._tasks.get(task_id, default) def has_task(self, task_id): return task_id in self._tasks def re_enable(self, task, config=None): task.scheduler_disable_time = None task.failures.clear() if config: self.set_status(task, FAILED, config) task.failures.clear() def set_status(self, task, new_status, config=None): if new_status == FAILED: assert config is not None if new_status == DISABLED and task.status == RUNNING: return if task.status == DISABLED: if new_status == DONE: self.re_enable(task) # don't allow workers to override a scheduler disable elif task.scheduler_disable_time is not None: return if new_status == FAILED and task.can_disable(): task.add_failure() if task.has_excessive_failures(): task.scheduler_disable_time = time.time() new_status = DISABLED notifications.send_error_email( 'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id), '{task} failed {failures} times in the last {window} seconds, so it is being ' 'disabled for {persist} seconds'.format( failures=config.disable_failures, task=task.id, window=config.disable_window, persist=config.disable_persist, )) elif new_status == DISABLED: task.scheduler_disable_time = None self._status_tasks[task.status].pop(task.id) self._status_tasks[new_status][task.id] = task task.status = new_status def fail_dead_worker_task(self, task, config, assistants): # If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants: logger.info("Task %r is marked as running by disconnected worker %r -> marking as " "FAILED with retry delay of %rs", task.id, task.worker_running, config.retry_delay) task.worker_running = None self.set_status(task, FAILED, config) task.retry = time.time() + config.retry_delay def prune(self, task, config): remove = False # Mark tasks with no remaining active stakeholders for deletion if not task.stakeholders: if task.remove is None: logger.info("Task %r has stakeholders %r but none remain connected -> will remove " "task in %s seconds", task.id, task.stakeholders, config.remove_delay) task.remove = time.time() + config.remove_delay # Re-enable task after the disable time expires if task.status == DISABLED and task.scheduler_disable_time is not None: if time.time() - fix_time(task.scheduler_disable_time) > config.disable_persist: self.re_enable(task, config) # Remove tasks that have no stakeholders if task.remove and time.time() > task.remove: logger.info("Removing task %r (no connected stakeholders)", task.id) remove = True # Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0 if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time(): self.set_status(task, PENDING, config) return remove def inactivate_tasks(self, delete_tasks): # The terminology is a bit confusing: we used to "delete" tasks when they became inactive, # but with a pluggable state storage, you might very well want to keep some history of # older tasks as well. That's why we call it "inactivate" (as in the verb) for task in delete_tasks: task_obj = self._tasks.pop(task) self._status_tasks[task_obj.status].pop(task) def get_active_workers(self, last_active_lt=None, last_get_work_gt=None): for worker in six.itervalues(self._active_workers): if last_active_lt is not None and worker.last_active >= last_active_lt: continue last_get_work = getattr(worker, 'last_get_work', None) if last_get_work_gt is not None and ( last_get_work is None or last_get_work <= last_get_work_gt): continue yield worker def get_assistants(self, last_active_lt=None): return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt)) def get_worker_ids(self): return self._active_workers.keys() # only used for unit tests def get_worker(self, worker_id): return self._active_workers.setdefault(worker_id, Worker(worker_id)) def inactivate_workers(self, delete_workers): # Mark workers as inactive for worker in delete_workers: self._active_workers.pop(worker) # remove workers from tasks for task in self.get_active_tasks(): task.stakeholders.difference_update(delete_workers) task.workers.difference_update(delete_workers) def get_necessary_tasks(self): necessary_tasks = set() for task in self.get_active_tasks(): if task.status not in (DONE, DISABLED) or \ getattr(task, 'scheduler_disable_time', None) is not None: necessary_tasks.update(task.deps) necessary_tasks.add(task.id) return necessary_tasks class CentralPlannerScheduler(Scheduler): """ Async scheduler that can handle multiple workers, etc. Can be run locally or on a server (using RemoteScheduler + server.Server). """ def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs): """ Keyword Arguments: :param config: an object of class "scheduler" or None (in which the global instance will be used) :param resources: a dict of str->int constraints :param task_history_override: ignore config and use this object as the task history """ self._config = config or scheduler(**kwargs) self._state = SimpleTaskState(self._config.state_path) if task_history_impl: self._task_history = task_history_impl elif self._config.record_task_history: from luigi import db_task_history # Needs sqlalchemy, thus imported here self._task_history = db_task_history.DbTaskHistory() else: self._task_history = history.NopHistory() self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter? self._make_task = functools.partial( Task, disable_failures=self._config.disable_failures, disable_hard_timeout=self._config.disable_hard_timeout, disable_window=self._config.disable_window) self._worker_requests = {} def load(self): self._state.load() def dump(self): self._state.dump() def prune(self): logger.info("Starting pruning of task graph") remove_workers = [] for worker in self._state.get_active_workers(): if worker.prune(self._config): logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay) remove_workers.append(worker.id) self._state.inactivate_workers(remove_workers) assistant_ids = set(w.id for w in self._state.get_assistants()) remove_tasks = [] if assistant_ids: necessary_tasks = self._state.get_necessary_tasks() else: necessary_tasks = () for task in self._state.get_active_tasks(): self._state.fail_dead_worker_task(task, self._config, assistant_ids) if task.id not in necessary_tasks and self._state.prune(task, self._config): remove_tasks.append(task.id) self._state.inactivate_tasks(remove_tasks) logger.info("Done pruning task graph") def update(self, worker_id, worker_reference=None, get_work=False): """ Keep track of whenever the worker was last active. """ worker = self._state.get_worker(worker_id) worker.update(worker_reference, get_work=get_work) def _update_priority(self, task, prio, worker): """ Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled. """ task.priority = prio = max(prio, task.priority) for dep in task.deps or []: t = self._state.get_task(dep) if t is not None and prio > t.priority: self._update_priority(t, prio, worker) def add_task(self, task_id=None, status=PENDING, runnable=True, deps=None, new_deps=None, expl=None, resources=None, priority=0, family='', module=None, params=None, assistant=False, tracking_url=None, **kwargs): """ * add task identified by task_id if it doesn't exist * if deps is not None, update dependency list * update status of task * add additional workers/stakeholders * update priority when needed """ worker_id = kwargs['worker'] self.update(worker_id) task = self._state.get_task(task_id, setdefault=self._make_task( task_id=task_id, status=PENDING, deps=deps, resources=resources, priority=priority, family=family, module=module, params=params)) # for setting priority, we'll sometimes create tasks with unset family and params if not task.family: task.family = family if not getattr(task, 'module', None): task.module = module if not task.params: task.params = _get_default(params, {}) if tracking_url is not None or task.status != RUNNING: task.tracking_url = tracking_url if task.remove is not None: task.remove = None # unmark task for removal so it isn't removed after being added if expl is not None: task.expl = expl if not (task.status == RUNNING and status == PENDING) or new_deps: # don't allow re-scheduling of task while it is running, it must either fail or succeed first if status == PENDING or status != task.status: # Update the DB only if there was a acctual change, to prevent noise. # We also check for status == PENDING b/c that's the default value # (so checking for status != task.status woule lie) self._update_task_history(task, status) self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config) if status == FAILED: task.retry = self._retry_time(task, self._config) if deps is not None: task.deps = set(deps) if new_deps is not None: task.deps.update(new_deps) if resources is not None: task.resources = resources if not assistant: task.stakeholders.add(worker_id) # Task dependencies might not exist yet. Let's create dummy tasks for them for now. # Otherwise the task dependencies might end up being pruned if scheduling takes a long time for dep in task.deps or []: t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority)) t.stakeholders.add(worker_id) self._update_priority(task, priority, worker_id) if runnable: task.workers.add(worker_id) self._state.get_worker(worker_id).tasks.add(task) task.runnable = runnable def add_worker(self, worker, info, **kwargs): self._state.get_worker(worker).add_info(info) def update_resources(self, **resources): if self._resources is None: self._resources = {} self._resources.update(resources) def _has_resources(self, needed_resources, used_resources): if needed_resources is None: return True available_resources = self._resources or {} for resource, amount in six.iteritems(needed_resources): if amount + used_resources[resource] > available_resources.get(resource, 1): return False return True def _used_resources(self): used_resources = collections.defaultdict(int) if self._resources is not None: for task in self._state.get_active_tasks(): if task.status == RUNNING and task.resources: for resource, amount in six.iteritems(task.resources): used_resources[resource] += amount return used_resources def _rank(self, task): """ Return worker's rank function for task scheduling. :return: """ return task.priority, -task.time def _schedulable(self, task): if task.status != PENDING: return False for dep in task.deps: dep_task = self._state.get_task(dep, default=None) if dep_task is None or dep_task.status != DONE: return False return True def _retry_time(self, task, config): return time.time() + config.retry_delay def get_work(self, host=None, assistant=False, **kwargs): # TODO: remove any expired nodes # Algo: iterate over all nodes, find the highest priority node no dependencies and available # resources. # Resource checking looks both at currently available resources and at which resources would # be available if all running tasks died and we rescheduled all workers greedily. We do both # checks in order to prevent a worker with many low-priority tasks from starving other # workers with higher priority tasks that share the same resources. # TODO: remove tasks that can't be done, figure out if the worker has absolutely # nothing it can wait for if self._config.prune_on_get_work: self.prune() worker_id = kwargs['worker'] # Return remaining tasks that have no FAILED descendants self.update(worker_id, {'host': host}, get_work=True) if assistant: self.add_worker(worker_id, [('assistant', assistant)]) best_task = None locally_pending_tasks = 0 running_tasks = [] upstream_table = {} greedy_resources = collections.defaultdict(int) n_unique_pending = 0 worker = self._state.get_worker(worker_id) if worker.is_trivial_worker(self._state): relevant_tasks = worker.get_pending_tasks(self._state) used_resources = collections.defaultdict(int) greedy_workers = dict() # If there's no resources, then they can grab any task else: relevant_tasks = self._state.get_pending_tasks() used_resources = self._used_resources() activity_limit = time.time() - self._config.worker_disconnect_delay active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit) greedy_workers = dict((worker.id, worker.info.get('workers', 1)) for worker in active_workers) tasks = list(relevant_tasks) tasks.sort(key=self._rank, reverse=True) for task in tasks: upstream_status = self._upstream_status(task.id, upstream_table) in_workers = (assistant and getattr(task, 'runnable', bool(task.workers))) or worker_id in task.workers if task.status == RUNNING and in_workers: # Return a list of currently running tasks to the client, # makes it easier to troubleshoot other_worker = self._state.get_worker(task.worker_running) more_info = {'task_id': task.id, 'worker': str(other_worker)} if other_worker is not None: more_info.update(other_worker.info) running_tasks.append(more_info) if task.status == PENDING and in_workers and upstream_status != UPSTREAM_DISABLED: locally_pending_tasks += 1 if len(task.workers) == 1 and not assistant: n_unique_pending += 1 if best_task: continue if task.status == RUNNING and (task.worker_running in greedy_workers): greedy_workers[task.worker_running] -= 1 for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount if self._schedulable(task) and self._has_resources(task.resources, greedy_resources): if in_workers and self._has_resources(task.resources, used_resources): best_task = task else: workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers for task_worker in workers: if greedy_workers.get(task_worker, 0) > 0: # use up a worker greedy_workers[task_worker] -= 1 # keep track of the resources used in greedy scheduling for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount break reply = {'n_pending_tasks': locally_pending_tasks, 'running_tasks': running_tasks, 'task_id': None, 'n_unique_pending': n_unique_pending} if best_task: self._state.set_status(best_task, RUNNING, self._config) best_task.worker_running = worker_id best_task.time_running = time.time() self._update_task_history(best_task, RUNNING, host=host) reply['task_id'] = best_task.id reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = best_task.params return reply def ping(self, **kwargs): worker_id = kwargs['worker'] self.update(worker_id) def _upstream_status(self, task_id, upstream_status_table): if task_id in upstream_status_table: return upstream_status_table[task_id] elif self._state.has_task(task_id): task_stack = [task_id] while task_stack: dep_id = task_stack.pop() if self._state.has_task(dep_id): dep = self._state.get_task(dep_id) if dep.status == DONE: continue if dep_id not in upstream_status_table: if dep.status == PENDING and dep.deps: task_stack = task_stack + [dep_id] + list(dep.deps) upstream_status_table[dep_id] = '' # will be updated postorder else: dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '') upstream_status_table[dep_id] = dep_status elif upstream_status_table[dep_id] == '' and dep.deps: # This is the postorder update step when we set the # status based on the previously calculated child elements upstream_status = [upstream_status_table.get(a_task_id, '') for a_task_id in dep.deps] upstream_status.append('') # to handle empty list status = max(upstream_status, key=UPSTREAM_SEVERITY_KEY) upstream_status_table[dep_id] = status return upstream_status_table[dep_id] def _serialize_task(self, task_id, include_deps=True): task = self._state.get_task(task_id) ret = { 'status': task.status, 'workers': list(task.workers), 'worker_running': task.worker_running, 'time_running': getattr(task, "time_running", None), 'start_time': task.time, 'params': task.params, 'name': task.family, 'priority': task.priority, 'resources': task.resources, 'tracking_url': getattr(task, "tracking_url", None), } if task.status == DISABLED: ret['re_enable_able'] = task.scheduler_disable_time is not None if include_deps: ret['deps'] = list(task.deps) return ret def graph(self, **kwargs): self.prune() serialized = {} for task in self._state.get_active_tasks(): serialized[task.id] = self._serialize_task(task.id) return serialized def _recurse_deps(self, task_id, serialized): if task_id not in serialized: task = self._state.get_task(task_id) if task is None or not task.family: logger.warn('Missing task for id [%s]', task_id) # try to infer family and params from task_id try: family, _, param_str = task_id.rstrip(')').partition('(') params = dict(param.split('=') for param in param_str.split(', ')) except BaseException: family, params = '', {} serialized[task_id] = { 'deps': [], 'status': UNKNOWN, 'workers': [], 'start_time': UNKNOWN, 'params': params, 'name': family, 'priority': 0, } else: serialized[task_id] = self._serialize_task(task_id) for dep in task.deps: self._recurse_deps(dep, serialized) def dep_graph(self, task_id, **kwargs): self.prune() serialized = {} if self._state.has_task(task_id): self._recurse_deps(task_id, serialized) return serialized def task_list(self, status, upstream_status, limit=True, search=None, **kwargs): """ Query for a subset of tasks by status. """ self.prune() result = {} upstream_status_table = {} # used to memoize upstream status if search is None: filter_func = lambda _: True else: terms = search.split() filter_func = lambda t: all(term in t.id for term in terms) for task in filter(filter_func, self._state.get_active_tasks(status)): if (task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table)): serialized = self._serialize_task(task.id, False) result[task.id] = serialized if limit and len(result) > self._config.max_shown_tasks: return {'num_tasks': len(result)} return result def worker_list(self, include_running=True, **kwargs): self.prune() workers = [ dict( name=worker.id, last_active=worker.last_active, started=getattr(worker, 'started', None), **worker.info ) for worker in self._state.get_active_workers()] workers.sort(key=lambda worker: worker['started'], reverse=True) if include_running: running = collections.defaultdict(dict) num_pending = collections.defaultdict(int) num_uniques = collections.defaultdict(int) for task in self._state.get_pending_tasks(): if task.status == RUNNING and task.worker_running: running[task.worker_running][task.id] = self._serialize_task(task.id, False) elif task.status == PENDING: for worker in task.workers: num_pending[worker] += 1 if len(task.workers) == 1: num_uniques[list(task.workers)[0]] += 1 for worker in workers: tasks = running[worker['name']] worker['num_running'] = len(tasks) worker['num_pending'] = num_pending[worker['name']] worker['num_uniques'] = num_uniques[worker['name']] worker['running'] = tasks return workers def inverse_dep_graph(self, task_id, **kwargs): self.prune() serialized = {} if self._state.has_task(task_id): self._traverse_inverse_deps(task_id, serialized) return serialized def _traverse_inverse_deps(self, task_id, serialized): stack = [task_id] serialized[task_id] = self._serialize_task(task_id) while len(stack) > 0: curr_id = stack.pop() for task in self._state.get_active_tasks(): if curr_id in task.deps: serialized[curr_id]["deps"].append(task.id) if task.id not in serialized: serialized[task.id] = self._serialize_task(task.id) serialized[task.id]["deps"] = [] stack.append(task.id) def task_search(self, task_str, **kwargs): """ Query for a subset of tasks by task_id. :param task_str: :return: """ self.prune() result = collections.defaultdict(dict) for task in self._state.get_active_tasks(): if task.id.find(task_str) != -1: serialized = self._serialize_task(task.id, False) result[task.status][task.id] = serialized return result def re_enable_task(self, task_id): serialized = {} task = self._state.get_task(task_id) if task and task.status == DISABLED and task.scheduler_disable_time: self._state.re_enable(task, self._config) serialized = self._serialize_task(task_id) return serialized def fetch_error(self, task_id, **kwargs): if self._state.has_task(task_id): return {"taskId": task_id, "error": self._state.get_task(task_id).expl} else: return {"taskId": task_id, "error": ""} def _update_task_history(self, task, status, host=None): try: if status == DONE or status == FAILED: successful = (status == DONE) self._task_history.task_finished(task, successful) elif status == PENDING: self._task_history.task_scheduled(task) elif status == RUNNING: self._task_history.task_started(task, host) except BaseException: logger.warning("Error saving Task history", exc_info=True) @property def task_history(self): # Used by server.py to expose the calls return self._task_history # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The system for scheduling tasks and executing them in order. Deals with dependencies, priorities, resources, etc. The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them. See :doc:`/central_scheduler` for more info. """ import collections try: import cPickle as pickle except ImportError: import pickle import datetime import functools import itertools import logging import os import time from luigi import six from luigi import configuration from luigi import notifications from luigi import parameter from luigi import task_history as history from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN from luigi.task import Config logger = logging.getLogger("luigi.server") class Scheduler(object): """ Abstract base class. Note that the methods all take string arguments, not Task objects... """"" add_task = NotImplemented get_work = NotImplemented ping = NotImplemented UPSTREAM_RUNNING = 'UPSTREAM_RUNNING' UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT' UPSTREAM_FAILED = 'UPSTREAM_FAILED' UPSTREAM_DISABLED = 'UPSTREAM_DISABLED' UPSTREAM_SEVERITY_ORDER = ( '', UPSTREAM_RUNNING, UPSTREAM_MISSING_INPUT, UPSTREAM_FAILED, UPSTREAM_DISABLED, ) UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index STATUS_TO_UPSTREAM_MAP = { FAILED: UPSTREAM_FAILED, RUNNING: UPSTREAM_RUNNING, PENDING: UPSTREAM_MISSING_INPUT, DISABLED: UPSTREAM_DISABLED, } class scheduler(Config): # TODO(erikbern): the config_path is needed for backwards compatilibity. We should drop the compatibility # at some point (in particular this would force users to replace all dashes with underscores in the config) retry_delay = parameter.FloatParameter(default=900.0) remove_delay = parameter.FloatParameter(default=600.0) worker_disconnect_delay = parameter.FloatParameter(default=60.0) state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle') # Jobs are disabled if we see more than disable_failures failures in disable_window seconds. # These disables last for disable_persist seconds. disable_window = parameter.IntParameter(default=3600, config_path=dict(section='scheduler', name='disable-window-seconds')) disable_failures = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-num-failures')) disable_hard_timeout = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-hard-timeout')) disable_persist = parameter.IntParameter(default=86400, config_path=dict(section='scheduler', name='disable-persist-seconds')) max_shown_tasks = parameter.IntParameter(default=100000) prune_done_tasks = parameter.BoolParameter(default=False) record_task_history = parameter.BoolParameter(default=False) prune_on_get_work = parameter.BoolParameter(default=False) def fix_time(x): # Backwards compatibility for a fix in Dec 2014. Prior to the fix, pickled state might store datetime objects # Let's remove this function soon if isinstance(x, datetime.datetime): return time.mktime(x.timetuple()) else: return x class Failures(object): """ This class tracks the number of failures in a given time window. Failures added are marked with the current timestamp, and this class counts the number of failures in a sliding time window ending at the present. """ def __init__(self, window): """ Initialize with the given window. :param window: how long to track failures for, as a float (number of seconds). """ self.window = window self.failures = collections.deque() self.first_failure_time = None def add_failure(self): """ Add a failure event with the current timestamp. """ failure_time = time.time() if not self.first_failure_time: self.first_failure_time = failure_time self.failures.append(failure_time) def num_failures(self): """ Return the number of failures in the window. """ min_time = time.time() - self.window while self.failures and fix_time(self.failures[0]) < min_time: self.failures.popleft() return len(self.failures) def clear(self): """ Clear the failure queue. """ self.failures.clear() def _get_default(x, default): if x is not None: return x else: return default class Task(object): def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None, params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None, tracking_url=None): self.id = task_id self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active) self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active if deps is None: self.deps = set() else: self.deps = set(deps) self.status = status # PENDING, RUNNING, FAILED or DONE self.time = time.time() # Timestamp when task was first added self.retry = None self.remove = None self.worker_running = None # the worker id that is currently running the task or None self.time_running = None # Timestamp when picked up by worker self.expl = None self.priority = priority self.resources = _get_default(resources, {}) self.family = family self.module = module self.params = _get_default(params, {}) self.disable_failures = disable_failures self.disable_hard_timeout = disable_hard_timeout self.failures = Failures(disable_window) self.tracking_url = tracking_url self.scheduler_disable_time = None self.runnable = False def __repr__(self): return "Task(%r)" % vars(self) def add_failure(self): self.failures.add_failure() def has_excessive_failures(self): if (self.failures.first_failure_time is not None and self.disable_hard_timeout): if (time.time() >= self.failures.first_failure_time + self.disable_hard_timeout): return True if self.failures.num_failures() >= self.disable_failures: return True return False def can_disable(self): return (self.disable_failures is not None or self.disable_hard_timeout is not None) class Worker(object): """ Structure for tracking worker activity and keeping their references. """ def __init__(self, worker_id, last_active=None): self.id = worker_id self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host) self.last_active = last_active or time.time() # seconds since epoch self.last_get_work = None self.started = time.time() # seconds since epoch self.tasks = set() # task objects self.info = {} def add_info(self, info): self.info.update(info) def update(self, worker_reference, get_work=False): if worker_reference: self.reference = worker_reference self.last_active = time.time() if get_work: self.last_get_work = time.time() def prune(self, config): # Delete workers that haven't said anything for a while (probably killed) if self.last_active + config.worker_disconnect_delay < time.time(): return True def get_pending_tasks(self, state): """ Get PENDING (and RUNNING) tasks for this worker. You have to pass in the state for optimization reasons. """ if len(self.tasks) < state.num_pending_tasks(): return six.moves.filter(lambda task: task.status in [PENDING, RUNNING], self.tasks) else: return state.get_pending_tasks() def is_trivial_worker(self, state): """ If it's not an assistant having only tasks that are without requirements. We have to pass the state parameter for optimization reasons. """ if self.assistant: return False return all(not task.resources for task in self.get_pending_tasks(state)) @property def assistant(self): return self.info.get('assistant', False) def __str__(self): return self.id class SimpleTaskState(object): """ Keep track of the current state and handle persistance. The point of this class is to enable other ways to keep state, eg. by using a database These will be implemented by creating an abstract base class that this and other classes inherit from. """ def __init__(self, state_path): self._state_path = state_path self._tasks = {} # map from id to a Task object self._status_tasks = collections.defaultdict(dict) self._active_workers = {} # map from id to a Worker object def get_state(self): return self._tasks, self._active_workers def set_state(self, state): self._tasks, self._active_workers = state def dump(self): try: with open(self._state_path, 'wb') as fobj: pickle.dump(self.get_state(), fobj) except IOError: logger.warning("Failed saving scheduler state", exc_info=1) else: logger.info("Saved state in %s", self._state_path) # prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control? def load(self): if os.path.exists(self._state_path): logger.info("Attempting to load state from %s", self._state_path) try: with open(self._state_path, 'rb') as fobj: state = pickle.load(fobj) except BaseException: logger.exception("Error when loading state. Starting from clean slate.") return self.set_state(state) self._status_tasks = collections.defaultdict(dict) for task in six.itervalues(self._tasks): self._status_tasks[task.status][task.id] = task # Convert from old format # TODO: this is really ugly, we need something more future-proof # Every time we add an attribute to the Worker or Task class, this # code needs to be updated # Compatibility since 2014-06-02 for k, v in six.iteritems(self._active_workers): if isinstance(v, float): self._active_workers[k] = Worker(worker_id=k, last_active=v) # Compatibility since 2015-05-28 if any(not hasattr(w, 'tasks') for k, w in six.iteritems(self._active_workers)): # If you load from an old format where Workers don't contain tasks. for k, worker in six.iteritems(self._active_workers): worker.tasks = set() for task in six.itervalues(self._tasks): for worker_id in task.workers: self._active_workers[worker_id].tasks.add(task) # Compatibility since 2015-04-28 if any(not hasattr(t, 'disable_hard_timeout') for t in six.itervalues(self._tasks)): for t in six.itervalues(self._tasks): t.disable_hard_timeout = None else: logger.info("No prior state file exists at %s. Starting with clean slate", self._state_path) def get_active_tasks(self, status=None): if status: for task in six.itervalues(self._status_tasks[status]): yield task else: for task in six.itervalues(self._tasks): yield task def get_running_tasks(self): return six.itervalues(self._status_tasks[RUNNING]) def get_pending_tasks(self): return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status]) for status in [PENDING, RUNNING]) def num_pending_tasks(self): """ Return how many tasks are PENDING + RUNNING. O(1). """ return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING]) def get_task(self, task_id, default=None, setdefault=None): if setdefault: task = self._tasks.setdefault(task_id, setdefault) self._status_tasks[task.status][task.id] = task return task else: return self._tasks.get(task_id, default) def has_task(self, task_id): return task_id in self._tasks def re_enable(self, task, config=None): task.scheduler_disable_time = None task.failures.clear() if config: self.set_status(task, FAILED, config) task.failures.clear() def set_status(self, task, new_status, config=None): if new_status == FAILED: assert config is not None if new_status == DISABLED and task.status == RUNNING: return if task.status == DISABLED: if new_status == DONE: self.re_enable(task) # don't allow workers to override a scheduler disable elif task.scheduler_disable_time is not None: return if new_status == FAILED and task.can_disable() and task.status != DISABLED: task.add_failure() if task.has_excessive_failures(): task.scheduler_disable_time = time.time() new_status = DISABLED notifications.send_error_email( 'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id), '{task} failed {failures} times in the last {window} seconds, so it is being ' 'disabled for {persist} seconds'.format( failures=config.disable_failures, task=task.id, window=config.disable_window, persist=config.disable_persist, )) elif new_status == DISABLED: task.scheduler_disable_time = None self._status_tasks[task.status].pop(task.id) self._status_tasks[new_status][task.id] = task task.status = new_status def fail_dead_worker_task(self, task, config, assistants): # If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants: logger.info("Task %r is marked as running by disconnected worker %r -> marking as " "FAILED with retry delay of %rs", task.id, task.worker_running, config.retry_delay) task.worker_running = None self.set_status(task, FAILED, config) task.retry = time.time() + config.retry_delay def prune(self, task, config): remove = False # Mark tasks with no remaining active stakeholders for deletion if not task.stakeholders: if task.remove is None: logger.info("Task %r has stakeholders %r but none remain connected -> will remove " "task in %s seconds", task.id, task.stakeholders, config.remove_delay) task.remove = time.time() + config.remove_delay # Re-enable task after the disable time expires if task.status == DISABLED and task.scheduler_disable_time is not None: if time.time() - fix_time(task.scheduler_disable_time) > config.disable_persist: self.re_enable(task, config) # Remove tasks that have no stakeholders if task.remove and time.time() > task.remove: logger.info("Removing task %r (no connected stakeholders)", task.id) remove = True # Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0 if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time(): self.set_status(task, PENDING, config) return remove def inactivate_tasks(self, delete_tasks): # The terminology is a bit confusing: we used to "delete" tasks when they became inactive, # but with a pluggable state storage, you might very well want to keep some history of # older tasks as well. That's why we call it "inactivate" (as in the verb) for task in delete_tasks: task_obj = self._tasks.pop(task) self._status_tasks[task_obj.status].pop(task) def get_active_workers(self, last_active_lt=None, last_get_work_gt=None): for worker in six.itervalues(self._active_workers): if last_active_lt is not None and worker.last_active >= last_active_lt: continue last_get_work = getattr(worker, 'last_get_work', None) if last_get_work_gt is not None and ( last_get_work is None or last_get_work <= last_get_work_gt): continue yield worker def get_assistants(self, last_active_lt=None): return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt)) def get_worker_ids(self): return self._active_workers.keys() # only used for unit tests def get_worker(self, worker_id): return self._active_workers.setdefault(worker_id, Worker(worker_id)) def inactivate_workers(self, delete_workers): # Mark workers as inactive for worker in delete_workers: self._active_workers.pop(worker) # remove workers from tasks for task in self.get_active_tasks(): task.stakeholders.difference_update(delete_workers) task.workers.difference_update(delete_workers) def get_necessary_tasks(self): necessary_tasks = set() for task in self.get_active_tasks(): if task.status not in (DONE, DISABLED) or \ getattr(task, 'scheduler_disable_time', None) is not None: necessary_tasks.update(task.deps) necessary_tasks.add(task.id) return necessary_tasks class CentralPlannerScheduler(Scheduler): """ Async scheduler that can handle multiple workers, etc. Can be run locally or on a server (using RemoteScheduler + server.Server). """ def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs): """ Keyword Arguments: :param config: an object of class "scheduler" or None (in which the global instance will be used) :param resources: a dict of str->int constraints :param task_history_override: ignore config and use this object as the task history """ self._config = config or scheduler(**kwargs) self._state = SimpleTaskState(self._config.state_path) if task_history_impl: self._task_history = task_history_impl elif self._config.record_task_history: from luigi import db_task_history # Needs sqlalchemy, thus imported here self._task_history = db_task_history.DbTaskHistory() else: self._task_history = history.NopHistory() self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter? self._make_task = functools.partial( Task, disable_failures=self._config.disable_failures, disable_hard_timeout=self._config.disable_hard_timeout, disable_window=self._config.disable_window) self._worker_requests = {} def load(self): self._state.load() def dump(self): self._state.dump() def prune(self): logger.info("Starting pruning of task graph") remove_workers = [] for worker in self._state.get_active_workers(): if worker.prune(self._config): logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay) remove_workers.append(worker.id) self._state.inactivate_workers(remove_workers) assistant_ids = set(w.id for w in self._state.get_assistants()) remove_tasks = [] if assistant_ids: necessary_tasks = self._state.get_necessary_tasks() else: necessary_tasks = () for task in self._state.get_active_tasks(): self._state.fail_dead_worker_task(task, self._config, assistant_ids) if task.id not in necessary_tasks and self._state.prune(task, self._config): remove_tasks.append(task.id) self._state.inactivate_tasks(remove_tasks) logger.info("Done pruning task graph") def update(self, worker_id, worker_reference=None, get_work=False): """ Keep track of whenever the worker was last active. """ worker = self._state.get_worker(worker_id) worker.update(worker_reference, get_work=get_work) def _update_priority(self, task, prio, worker): """ Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled. """ task.priority = prio = max(prio, task.priority) for dep in task.deps or []: t = self._state.get_task(dep) if t is not None and prio > t.priority: self._update_priority(t, prio, worker) def add_task(self, task_id=None, status=PENDING, runnable=True, deps=None, new_deps=None, expl=None, resources=None, priority=0, family='', module=None, params=None, assistant=False, tracking_url=None, **kwargs): """ * add task identified by task_id if it doesn't exist * if deps is not None, update dependency list * update status of task * add additional workers/stakeholders * update priority when needed """ worker_id = kwargs['worker'] self.update(worker_id) task = self._state.get_task(task_id, setdefault=self._make_task( task_id=task_id, status=PENDING, deps=deps, resources=resources, priority=priority, family=family, module=module, params=params)) # for setting priority, we'll sometimes create tasks with unset family and params if not task.family: task.family = family if not getattr(task, 'module', None): task.module = module if not task.params: task.params = _get_default(params, {}) if tracking_url is not None or task.status != RUNNING: task.tracking_url = tracking_url if task.remove is not None: task.remove = None # unmark task for removal so it isn't removed after being added if expl is not None: task.expl = expl if not (task.status == RUNNING and status == PENDING) or new_deps: # don't allow re-scheduling of task while it is running, it must either fail or succeed first if status == PENDING or status != task.status: # Update the DB only if there was a acctual change, to prevent noise. # We also check for status == PENDING b/c that's the default value # (so checking for status != task.status woule lie) self._update_task_history(task, status) self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config) if status == FAILED: task.retry = self._retry_time(task, self._config) if deps is not None: task.deps = set(deps) if new_deps is not None: task.deps.update(new_deps) if resources is not None: task.resources = resources if not assistant: task.stakeholders.add(worker_id) # Task dependencies might not exist yet. Let's create dummy tasks for them for now. # Otherwise the task dependencies might end up being pruned if scheduling takes a long time for dep in task.deps or []: t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority)) t.stakeholders.add(worker_id) self._update_priority(task, priority, worker_id) if runnable: task.workers.add(worker_id) self._state.get_worker(worker_id).tasks.add(task) task.runnable = runnable def add_worker(self, worker, info, **kwargs): self._state.get_worker(worker).add_info(info) def update_resources(self, **resources): if self._resources is None: self._resources = {} self._resources.update(resources) def _has_resources(self, needed_resources, used_resources): if needed_resources is None: return True available_resources = self._resources or {} for resource, amount in six.iteritems(needed_resources): if amount + used_resources[resource] > available_resources.get(resource, 1): return False return True def _used_resources(self): used_resources = collections.defaultdict(int) if self._resources is not None: for task in self._state.get_active_tasks(): if task.status == RUNNING and task.resources: for resource, amount in six.iteritems(task.resources): used_resources[resource] += amount return used_resources def _rank(self, task): """ Return worker's rank function for task scheduling. :return: """ return task.priority, -task.time def _schedulable(self, task): if task.status != PENDING: return False for dep in task.deps: dep_task = self._state.get_task(dep, default=None) if dep_task is None or dep_task.status != DONE: return False return True def _retry_time(self, task, config): return time.time() + config.retry_delay def get_work(self, host=None, assistant=False, **kwargs): # TODO: remove any expired nodes # Algo: iterate over all nodes, find the highest priority node no dependencies and available # resources. # Resource checking looks both at currently available resources and at which resources would # be available if all running tasks died and we rescheduled all workers greedily. We do both # checks in order to prevent a worker with many low-priority tasks from starving other # workers with higher priority tasks that share the same resources. # TODO: remove tasks that can't be done, figure out if the worker has absolutely # nothing it can wait for if self._config.prune_on_get_work: self.prune() worker_id = kwargs['worker'] # Return remaining tasks that have no FAILED descendants self.update(worker_id, {'host': host}, get_work=True) if assistant: self.add_worker(worker_id, [('assistant', assistant)]) best_task = None locally_pending_tasks = 0 running_tasks = [] upstream_table = {} greedy_resources = collections.defaultdict(int) n_unique_pending = 0 worker = self._state.get_worker(worker_id) if worker.is_trivial_worker(self._state): relevant_tasks = worker.get_pending_tasks(self._state) used_resources = collections.defaultdict(int) greedy_workers = dict() # If there's no resources, then they can grab any task else: relevant_tasks = self._state.get_pending_tasks() used_resources = self._used_resources() activity_limit = time.time() - self._config.worker_disconnect_delay active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit) greedy_workers = dict((worker.id, worker.info.get('workers', 1)) for worker in active_workers) tasks = list(relevant_tasks) tasks.sort(key=self._rank, reverse=True) for task in tasks: upstream_status = self._upstream_status(task.id, upstream_table) in_workers = (assistant and getattr(task, 'runnable', bool(task.workers))) or worker_id in task.workers if task.status == RUNNING and in_workers: # Return a list of currently running tasks to the client, # makes it easier to troubleshoot other_worker = self._state.get_worker(task.worker_running) more_info = {'task_id': task.id, 'worker': str(other_worker)} if other_worker is not None: more_info.update(other_worker.info) running_tasks.append(more_info) if task.status == PENDING and in_workers and upstream_status != UPSTREAM_DISABLED: locally_pending_tasks += 1 if len(task.workers) == 1 and not assistant: n_unique_pending += 1 if best_task: continue if task.status == RUNNING and (task.worker_running in greedy_workers): greedy_workers[task.worker_running] -= 1 for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount if self._schedulable(task) and self._has_resources(task.resources, greedy_resources): if in_workers and self._has_resources(task.resources, used_resources): best_task = task else: workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers for task_worker in workers: if greedy_workers.get(task_worker, 0) > 0: # use up a worker greedy_workers[task_worker] -= 1 # keep track of the resources used in greedy scheduling for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount break reply = {'n_pending_tasks': locally_pending_tasks, 'running_tasks': running_tasks, 'task_id': None, 'n_unique_pending': n_unique_pending} if best_task: self._state.set_status(best_task, RUNNING, self._config) best_task.worker_running = worker_id best_task.time_running = time.time() self._update_task_history(best_task, RUNNING, host=host) reply['task_id'] = best_task.id reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = best_task.params return reply def ping(self, **kwargs): worker_id = kwargs['worker'] self.update(worker_id) def _upstream_status(self, task_id, upstream_status_table): if task_id in upstream_status_table: return upstream_status_table[task_id] elif self._state.has_task(task_id): task_stack = [task_id] while task_stack: dep_id = task_stack.pop() if self._state.has_task(dep_id): dep = self._state.get_task(dep_id) if dep.status == DONE: continue if dep_id not in upstream_status_table: if dep.status == PENDING and dep.deps: task_stack = task_stack + [dep_id] + list(dep.deps) upstream_status_table[dep_id] = '' # will be updated postorder else: dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '') upstream_status_table[dep_id] = dep_status elif upstream_status_table[dep_id] == '' and dep.deps: # This is the postorder update step when we set the # status based on the previously calculated child elements upstream_status = [upstream_status_table.get(a_task_id, '') for a_task_id in dep.deps] upstream_status.append('') # to handle empty list status = max(upstream_status, key=UPSTREAM_SEVERITY_KEY) upstream_status_table[dep_id] = status return upstream_status_table[dep_id] def _serialize_task(self, task_id, include_deps=True): task = self._state.get_task(task_id) ret = { 'status': task.status, 'workers': list(task.workers), 'worker_running': task.worker_running, 'time_running': getattr(task, "time_running", None), 'start_time': task.time, 'params': task.params, 'name': task.family, 'priority': task.priority, 'resources': task.resources, 'tracking_url': getattr(task, "tracking_url", None), } if task.status == DISABLED: ret['re_enable_able'] = task.scheduler_disable_time is not None if include_deps: ret['deps'] = list(task.deps) return ret def graph(self, **kwargs): self.prune() serialized = {} for task in self._state.get_active_tasks(): serialized[task.id] = self._serialize_task(task.id) return serialized def _recurse_deps(self, task_id, serialized): if task_id not in serialized: task = self._state.get_task(task_id) if task is None or not task.family: logger.warn('Missing task for id [%s]', task_id) # try to infer family and params from task_id try: family, _, param_str = task_id.rstrip(')').partition('(') params = dict(param.split('=') for param in param_str.split(', ')) except BaseException: family, params = '', {} serialized[task_id] = { 'deps': [], 'status': UNKNOWN, 'workers': [], 'start_time': UNKNOWN, 'params': params, 'name': family, 'priority': 0, } else: serialized[task_id] = self._serialize_task(task_id) for dep in task.deps: self._recurse_deps(dep, serialized) def dep_graph(self, task_id, **kwargs): self.prune() serialized = {} if self._state.has_task(task_id): self._recurse_deps(task_id, serialized) return serialized def task_list(self, status, upstream_status, limit=True, search=None, **kwargs): """ Query for a subset of tasks by status. """ self.prune() result = {} upstream_status_table = {} # used to memoize upstream status if search is None: filter_func = lambda _: True else: terms = search.split() filter_func = lambda t: all(term in t.id for term in terms) for task in filter(filter_func, self._state.get_active_tasks(status)): if (task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table)): serialized = self._serialize_task(task.id, False) result[task.id] = serialized if limit and len(result) > self._config.max_shown_tasks: return {'num_tasks': len(result)} return result def worker_list(self, include_running=True, **kwargs): self.prune() workers = [ dict( name=worker.id, last_active=worker.last_active, started=getattr(worker, 'started', None), **worker.info ) for worker in self._state.get_active_workers()] workers.sort(key=lambda worker: worker['started'], reverse=True) if include_running: running = collections.defaultdict(dict) num_pending = collections.defaultdict(int) num_uniques = collections.defaultdict(int) for task in self._state.get_pending_tasks(): if task.status == RUNNING and task.worker_running: running[task.worker_running][task.id] = self._serialize_task(task.id, False) elif task.status == PENDING: for worker in task.workers: num_pending[worker] += 1 if len(task.workers) == 1: num_uniques[list(task.workers)[0]] += 1 for worker in workers: tasks = running[worker['name']] worker['num_running'] = len(tasks) worker['num_pending'] = num_pending[worker['name']] worker['num_uniques'] = num_uniques[worker['name']] worker['running'] = tasks return workers def inverse_dep_graph(self, task_id, **kwargs): self.prune() serialized = {} if self._state.has_task(task_id): self._traverse_inverse_deps(task_id, serialized) return serialized def _traverse_inverse_deps(self, task_id, serialized): stack = [task_id] serialized[task_id] = self._serialize_task(task_id) while len(stack) > 0: curr_id = stack.pop() for task in self._state.get_active_tasks(): if curr_id in task.deps: serialized[curr_id]["deps"].append(task.id) if task.id not in serialized: serialized[task.id] = self._serialize_task(task.id) serialized[task.id]["deps"] = [] stack.append(task.id) def task_search(self, task_str, **kwargs): """ Query for a subset of tasks by task_id. :param task_str: :return: """ self.prune() result = collections.defaultdict(dict) for task in self._state.get_active_tasks(): if task.id.find(task_str) != -1: serialized = self._serialize_task(task.id, False) result[task.status][task.id] = serialized return result def re_enable_task(self, task_id): serialized = {} task = self._state.get_task(task_id) if task and task.status == DISABLED and task.scheduler_disable_time: self._state.re_enable(task, self._config) serialized = self._serialize_task(task_id) return serialized def fetch_error(self, task_id, **kwargs): if self._state.has_task(task_id): return {"taskId": task_id, "error": self._state.get_task(task_id).expl} else: return {"taskId": task_id, "error": ""} def _update_task_history(self, task, status, host=None): try: if status == DONE or status == FAILED: successful = (status == DONE) self._task_history.task_finished(task, successful) elif status == PENDING: self._task_history.task_scheduled(task) elif status == RUNNING: self._task_history.task_started(task, host) except BaseException: logger.warning("Error saving Task history", exc_info=True) @property def task_history(self): # Used by server.py to expose the calls return self._task_history
BugsInPy/BugsInPy/temp/projects/luigi/bug-19-fixed/luigi/luigi/scheduler.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-19-buggy/luigi/luigi/scheduler.py
luigi-bug-10
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The system for scheduling tasks and executing them in order. Deals with dependencies, priorities, resources, etc. The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them. See :doc:`/central_scheduler` for more info. """ import collections import inspect try: import cPickle as pickle except ImportError: import pickle import functools import hashlib import itertools import logging import os import re import time from luigi import six from luigi import configuration from luigi import notifications from luigi import parameter from luigi import task_history as history from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN, \ BATCH_RUNNING from luigi.task import Config logger = logging.getLogger(__name__) UPSTREAM_RUNNING = 'UPSTREAM_RUNNING' UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT' UPSTREAM_FAILED = 'UPSTREAM_FAILED' UPSTREAM_DISABLED = 'UPSTREAM_DISABLED' UPSTREAM_SEVERITY_ORDER = ( '', UPSTREAM_RUNNING, UPSTREAM_MISSING_INPUT, UPSTREAM_FAILED, UPSTREAM_DISABLED, ) UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index STATUS_TO_UPSTREAM_MAP = { FAILED: UPSTREAM_FAILED, RUNNING: UPSTREAM_RUNNING, BATCH_RUNNING: UPSTREAM_RUNNING, PENDING: UPSTREAM_MISSING_INPUT, DISABLED: UPSTREAM_DISABLED, } TASK_FAMILY_RE = re.compile(r'([^(_]+)[(_]') RPC_METHODS = {} _retry_policy_fields = [ "retry_count", "disable_hard_timeout", "disable_window", ] RetryPolicy = collections.namedtuple("RetryPolicy", _retry_policy_fields) def _get_empty_retry_policy(): return RetryPolicy(*[None] * len(_retry_policy_fields)) def rpc_method(**request_args): def _rpc_method(fn): # If request args are passed, return this function again for use as # the decorator function with the request args attached. fn_args = inspect.getargspec(fn) assert not fn_args.varargs assert fn_args.args[0] == 'self' all_args = fn_args.args[1:] defaults = dict(zip(reversed(all_args), reversed(fn_args.defaults or ()))) required_args = frozenset(arg for arg in all_args if arg not in defaults) fn_name = fn.__name__ @functools.wraps(fn) def rpc_func(self, *args, **kwargs): actual_args = defaults.copy() actual_args.update(dict(zip(all_args, args))) actual_args.update(kwargs) if not all(arg in actual_args for arg in required_args): raise TypeError('{} takes {} arguments ({} given)'.format( fn_name, len(all_args), len(actual_args))) return self._request('/api/{}'.format(fn_name), actual_args, **request_args) RPC_METHODS[fn_name] = rpc_func return fn return _rpc_method class scheduler(Config): # TODO(erikbern): the config_path is needed for backwards compatilibity. We # should drop the compatibility at some point retry_delay = parameter.FloatParameter(default=900.0) remove_delay = parameter.FloatParameter(default=600.0) worker_disconnect_delay = parameter.FloatParameter(default=60.0) state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle') # Jobs are disabled if we see more than retry_count failures in disable_window seconds. # These disables last for disable_persist seconds. disable_window = parameter.IntParameter(default=3600, config_path=dict(section='scheduler', name='disable-window-seconds')) retry_count = parameter.IntParameter(default=999999999, config_path=dict(section='scheduler', name='disable_failures')) disable_hard_timeout = parameter.IntParameter(default=999999999, config_path=dict(section='scheduler', name='disable-hard-timeout')) disable_persist = parameter.IntParameter(default=86400, config_path=dict(section='scheduler', name='disable-persist-seconds')) max_shown_tasks = parameter.IntParameter(default=100000) max_graph_nodes = parameter.IntParameter(default=100000) record_task_history = parameter.BoolParameter(default=False) prune_on_get_work = parameter.BoolParameter(default=False) def _get_retry_policy(self): return RetryPolicy(self.retry_count, self.disable_hard_timeout, self.disable_window) class Failures(object): """ This class tracks the number of failures in a given time window. Failures added are marked with the current timestamp, and this class counts the number of failures in a sliding time window ending at the present. """ def __init__(self, window): """ Initialize with the given window. :param window: how long to track failures for, as a float (number of seconds). """ self.window = window self.failures = collections.deque() self.first_failure_time = None def add_failure(self): """ Add a failure event with the current timestamp. """ failure_time = time.time() if not self.first_failure_time: self.first_failure_time = failure_time self.failures.append(failure_time) def num_failures(self): """ Return the number of failures in the window. """ min_time = time.time() - self.window while self.failures and self.failures[0] < min_time: self.failures.popleft() return len(self.failures) def clear(self): """ Clear the failure queue. """ self.failures.clear() def _get_default(x, default): if x is not None: return x else: return default class Task(object): def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None, params=None, tracking_url=None, status_message=None, retry_policy='notoptional'): self.id = task_id self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active) self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active if deps is None: self.deps = set() else: self.deps = set(deps) self.status = status # PENDING, RUNNING, FAILED or DONE self.time = time.time() # Timestamp when task was first added self.updated = self.time self.retry = None self.remove = None self.worker_running = None # the worker id that is currently running the task or None self.time_running = None # Timestamp when picked up by worker self.expl = None self.priority = priority self.resources = _get_default(resources, {}) self.family = family self.module = module self.params = _get_default(params, {}) self.retry_policy = retry_policy self.failures = Failures(self.retry_policy.disable_window) self.tracking_url = tracking_url self.status_message = status_message self.scheduler_disable_time = None self.runnable = False self.batchable = False self.batch_id = None def __repr__(self): return "Task(%r)" % vars(self) # TODO(2017-08-10) replace this function with direct calls to batchable # this only exists for backward compatibility def is_batchable(self): try: return self.batchable except AttributeError: return False def add_failure(self): self.failures.add_failure() def has_excessive_failures(self): if self.failures.first_failure_time is not None: if (time.time() >= self.failures.first_failure_time + self.retry_policy.disable_hard_timeout): return True logger.debug('%s task num failures is %s and limit is %s', self.id, self.failures.num_failures(), self.retry_policy.retry_count) if self.failures.num_failures() >= self.retry_policy.retry_count: logger.debug('%s task num failures limit(%s) is exceeded', self.id, self.retry_policy.retry_count) return True return False @property def pretty_id(self): param_str = ', '.join('{}={}'.format(key, value) for key, value in self.params.items()) return '{}({})'.format(self.family, param_str) class Worker(object): """ Structure for tracking worker activity and keeping their references. """ def __init__(self, worker_id, last_active=None): self.id = worker_id self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host) self.last_active = last_active or time.time() # seconds since epoch self.last_get_work = None self.started = time.time() # seconds since epoch self.tasks = set() # task objects self.info = {} self.disabled = False def add_info(self, info): self.info.update(info) def update(self, worker_reference, get_work=False): if worker_reference: self.reference = worker_reference self.last_active = time.time() if get_work: self.last_get_work = time.time() def prune(self, config): # Delete workers that haven't said anything for a while (probably killed) if self.last_active + config.worker_disconnect_delay < time.time(): return True def get_pending_tasks(self, state): """ Get PENDING (and RUNNING) tasks for this worker. You have to pass in the state for optimization reasons. """ if len(self.tasks) < state.num_pending_tasks(): return six.moves.filter(lambda task: task.status in [PENDING, RUNNING], self.tasks) else: return state.get_pending_tasks() def is_trivial_worker(self, state): """ If it's not an assistant having only tasks that are without requirements. We have to pass the state parameter for optimization reasons. """ if self.assistant: return False return all(not task.resources for task in self.get_pending_tasks(state)) @property def assistant(self): return self.info.get('assistant', False) def __str__(self): return self.id class SimpleTaskState(object): """ Keep track of the current state and handle persistance. The point of this class is to enable other ways to keep state, eg. by using a database These will be implemented by creating an abstract base class that this and other classes inherit from. """ def __init__(self, state_path): self._state_path = state_path self._tasks = {} # map from id to a Task object self._status_tasks = collections.defaultdict(dict) self._active_workers = {} # map from id to a Worker object self._task_batchers = {} def get_state(self): return self._tasks, self._active_workers, self._task_batchers def set_state(self, state): self._tasks, self._active_workers = state[:2] if len(state) >= 3: self._task_batchers = state[2] def dump(self): try: with open(self._state_path, 'wb') as fobj: pickle.dump(self.get_state(), fobj) except IOError: logger.warning("Failed saving scheduler state", exc_info=1) else: logger.info("Saved state in %s", self._state_path) # prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control? def load(self): if os.path.exists(self._state_path): logger.info("Attempting to load state from %s", self._state_path) try: with open(self._state_path, 'rb') as fobj: state = pickle.load(fobj) except BaseException: logger.exception("Error when loading state. Starting from empty state.") return self.set_state(state) self._status_tasks = collections.defaultdict(dict) for task in six.itervalues(self._tasks): self._status_tasks[task.status][task.id] = task else: logger.info("No prior state file exists at %s. Starting with empty state", self._state_path) def get_active_tasks(self, status=None): if status: for task in six.itervalues(self._status_tasks[status]): yield task else: for task in six.itervalues(self._tasks): yield task def get_batch_running_tasks(self, batch_id): assert batch_id is not None return [ task for task in self.get_active_tasks(BATCH_RUNNING) if task.batch_id == batch_id ] def get_running_tasks(self): return six.itervalues(self._status_tasks[RUNNING]) def get_pending_tasks(self): return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status]) for status in [PENDING, RUNNING]) def set_batcher(self, worker_id, family, batcher_args, max_batch_size): self._task_batchers.setdefault(worker_id, {}) self._task_batchers[worker_id][family] = (batcher_args, max_batch_size) def get_batcher(self, worker_id, family): return self._task_batchers.get(worker_id, {}).get(family, (None, 1)) def num_pending_tasks(self): """ Return how many tasks are PENDING + RUNNING. O(1). """ return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING]) def get_task(self, task_id, default=None, setdefault=None): if setdefault: task = self._tasks.setdefault(task_id, setdefault) self._status_tasks[task.status][task.id] = task return task else: return self._tasks.get(task_id, default) def has_task(self, task_id): return task_id in self._tasks def re_enable(self, task, config=None): task.scheduler_disable_time = None task.failures.clear() if config: self.set_status(task, FAILED, config) task.failures.clear() def set_batch_running(self, task, batch_id, worker_id): self.set_status(task, BATCH_RUNNING) task.batch_id = batch_id task.worker_running = worker_id def set_status(self, task, new_status, config=None): if new_status == FAILED: assert config is not None if new_status == DISABLED and task.status in (RUNNING, BATCH_RUNNING): return remove_on_failure = task.batch_id is not None and not task.batchable if task.status == DISABLED: if new_status == DONE: self.re_enable(task) # don't allow workers to override a scheduler disable elif task.scheduler_disable_time is not None and new_status != DISABLED: return if task.status == RUNNING and task.batch_id is not None: for batch_task in self.get_batch_running_tasks(task.batch_id): self.set_status(batch_task, new_status, config) batch_task.batch_id = None task.batch_id = None if new_status == FAILED and task.status != DISABLED: task.add_failure() if task.has_excessive_failures(): task.scheduler_disable_time = time.time() new_status = DISABLED notifications.send_error_email( 'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id), '{task} failed {failures} times in the last {window} seconds, so it is being ' 'disabled for {persist} seconds'.format( failures=task.retry_policy.retry_count, task=task.id, window=config.disable_window, persist=config.disable_persist, )) elif new_status == DISABLED: task.scheduler_disable_time = None if new_status != task.status: self._status_tasks[task.status].pop(task.id) self._status_tasks[new_status][task.id] = task task.status = new_status task.updated = time.time() if new_status == FAILED: task.retry = time.time() + config.retry_delay if remove_on_failure: task.remove = time.time() def fail_dead_worker_task(self, task, config, assistants): # If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants: logger.info("Task %r is marked as running by disconnected worker %r -> marking as " "FAILED with retry delay of %rs", task.id, task.worker_running, config.retry_delay) task.worker_running = None self.set_status(task, FAILED, config) task.retry = time.time() + config.retry_delay def update_status(self, task, config): # Mark tasks with no remaining active stakeholders for deletion if (not task.stakeholders) and (task.remove is None) and (task.status != RUNNING): # We don't check for the RUNNING case, because that is already handled # by the fail_dead_worker_task function. logger.debug("Task %r has no stakeholders anymore -> might remove " "task in %s seconds", task.id, config.remove_delay) task.remove = time.time() + config.remove_delay # Re-enable task after the disable time expires if task.status == DISABLED and task.scheduler_disable_time is not None: if time.time() - task.scheduler_disable_time > config.disable_persist: self.re_enable(task, config) # Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0 if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time(): self.set_status(task, PENDING, config) def may_prune(self, task): return task.remove and time.time() >= task.remove def inactivate_tasks(self, delete_tasks): # The terminology is a bit confusing: we used to "delete" tasks when they became inactive, # but with a pluggable state storage, you might very well want to keep some history of # older tasks as well. That's why we call it "inactivate" (as in the verb) for task in delete_tasks: task_obj = self._tasks.pop(task) self._status_tasks[task_obj.status].pop(task) def get_active_workers(self, last_active_lt=None, last_get_work_gt=None): for worker in six.itervalues(self._active_workers): if last_active_lt is not None and worker.last_active >= last_active_lt: continue last_get_work = getattr(worker, 'last_get_work', None) if last_get_work_gt is not None and ( last_get_work is None or last_get_work <= last_get_work_gt): continue yield worker def get_assistants(self, last_active_lt=None): return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt)) def get_worker_ids(self): return self._active_workers.keys() # only used for unit tests def get_worker(self, worker_id): return self._active_workers.setdefault(worker_id, Worker(worker_id)) def inactivate_workers(self, delete_workers): # Mark workers as inactive for worker in delete_workers: self._active_workers.pop(worker) self._remove_workers_from_tasks(delete_workers) def _remove_workers_from_tasks(self, workers, remove_stakeholders=True): for task in self.get_active_tasks(): if remove_stakeholders: task.stakeholders.difference_update(workers) task.workers.difference_update(workers) def disable_workers(self, workers): self._remove_workers_from_tasks(workers, remove_stakeholders=False) for worker in workers: self.get_worker(worker).disabled = True class Scheduler(object): """ Async scheduler that can handle multiple workers, etc. Can be run locally or on a server (using RemoteScheduler + server.Server). """ def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs): """ Keyword Arguments: :param config: an object of class "scheduler" or None (in which the global instance will be used) :param resources: a dict of str->int constraints :param task_history_impl: ignore config and use this object as the task history """ self._config = config or scheduler(**kwargs) self._state = SimpleTaskState(self._config.state_path) if task_history_impl: self._task_history = task_history_impl elif self._config.record_task_history: from luigi import db_task_history # Needs sqlalchemy, thus imported here self._task_history = db_task_history.DbTaskHistory() else: self._task_history = history.NopHistory() self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter? self._make_task = functools.partial(Task, retry_policy=self._config._get_retry_policy()) self._worker_requests = {} def load(self): self._state.load() def dump(self): self._state.dump() @rpc_method() def prune(self): logger.info("Starting pruning of task graph") self._prune_workers() self._prune_tasks() logger.info("Done pruning task graph") def _prune_workers(self): remove_workers = [] for worker in self._state.get_active_workers(): if worker.prune(self._config): logger.debug("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay) remove_workers.append(worker.id) self._state.inactivate_workers(remove_workers) def _prune_tasks(self): assistant_ids = set(w.id for w in self._state.get_assistants()) remove_tasks = [] for task in self._state.get_active_tasks(): self._state.fail_dead_worker_task(task, self._config, assistant_ids) self._state.update_status(task, self._config) if self._state.may_prune(task): logger.info("Removing task %r", task.id) remove_tasks.append(task.id) self._state.inactivate_tasks(remove_tasks) def update(self, worker_id, worker_reference=None, get_work=False): """ Keep track of whenever the worker was last active. """ worker = self._state.get_worker(worker_id) worker.update(worker_reference, get_work=get_work) return not getattr(worker, 'disabled', False) def _update_priority(self, task, prio, worker): """ Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled. """ task.priority = prio = max(prio, task.priority) for dep in task.deps or []: t = self._state.get_task(dep) if t is not None and prio > t.priority: self._update_priority(t, prio, worker) @rpc_method() def add_task_batcher(self, worker, task_family, batched_args, max_batch_size=float('inf')): self._state.set_batcher(worker, task_family, batched_args, max_batch_size) @rpc_method() def add_task(self, task_id=None, status=PENDING, runnable=True, deps=None, new_deps=None, expl=None, resources=None, priority=0, family='', module=None, params=None, assistant=False, tracking_url=None, worker=None, batchable=None, batch_id=None, retry_policy_dict={}, **kwargs): """ * add task identified by task_id if it doesn't exist * if deps is not None, update dependency list * update status of task * add additional workers/stakeholders * update priority when needed """ assert worker is not None worker_id = worker worker_enabled = self.update(worker_id) retry_policy = self._generate_retry_policy(retry_policy_dict) if worker_enabled: _default_task = self._make_task( task_id=task_id, status=PENDING, deps=deps, resources=resources, priority=priority, family=family, module=module, params=params, ) else: _default_task = None task = self._state.get_task(task_id, setdefault=_default_task) if task is None or (task.status != RUNNING and not worker_enabled): return # for setting priority, we'll sometimes create tasks with unset family and params if not task.family: task.family = family if not getattr(task, 'module', None): task.module = module if not task.params: task.params = _get_default(params, {}) if batch_id is not None: task.batch_id = batch_id if status == RUNNING and not task.worker_running: task.worker_running = worker_id if tracking_url is not None or task.status != RUNNING: task.tracking_url = tracking_url if task.batch_id is not None: for batch_task in self._state.get_batch_running_tasks(task.batch_id): batch_task.tracking_url = tracking_url if batchable is not None: task.batchable = batchable if task.remove is not None: task.remove = None # unmark task for removal so it isn't removed after being added if expl is not None: task.expl = expl if task.batch_id is not None: for batch_task in self._state.get_batch_running_tasks(task.batch_id): batch_task.expl = expl if not (task.status in (RUNNING, BATCH_RUNNING) and status == PENDING) or new_deps: # don't allow re-scheduling of task while it is running, it must either fail or succeed first if status == PENDING or status != task.status: # Update the DB only if there was a acctual change, to prevent noise. # We also check for status == PENDING b/c that's the default value # (so checking for status != task.status woule lie) self._update_task_history(task, status) self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config) if deps is not None: task.deps = set(deps) if new_deps is not None: task.deps.update(new_deps) if resources is not None: task.resources = resources if worker_enabled and not assistant: task.stakeholders.add(worker_id) # Task dependencies might not exist yet. Let's create dummy tasks for them for now. # Otherwise the task dependencies might end up being pruned if scheduling takes a long time for dep in task.deps or []: t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority)) t.stakeholders.add(worker_id) self._update_priority(task, priority, worker_id) # Because some tasks (non-dynamic dependencies) are `_make_task`ed # before we know their retry_policy, we always set it here task.retry_policy = retry_policy if runnable and status != FAILED and worker_enabled: task.workers.add(worker_id) self._state.get_worker(worker_id).tasks.add(task) task.runnable = runnable @rpc_method() def add_worker(self, worker, info, **kwargs): self._state.get_worker(worker).add_info(info) @rpc_method() def disable_worker(self, worker): self._state.disable_workers({worker}) @rpc_method() def update_resources(self, **resources): if self._resources is None: self._resources = {} self._resources.update(resources) def _generate_retry_policy(self, task_retry_policy_dict): retry_policy_dict = self._config._get_retry_policy()._asdict() retry_policy_dict.update({k: v for k, v in six.iteritems(task_retry_policy_dict) if v is not None}) return RetryPolicy(**retry_policy_dict) def _has_resources(self, needed_resources, used_resources): if needed_resources is None: return True available_resources = self._resources or {} for resource, amount in six.iteritems(needed_resources): if amount + used_resources[resource] > available_resources.get(resource, 1): return False return True def _used_resources(self): used_resources = collections.defaultdict(int) if self._resources is not None: for task in self._state.get_active_tasks(status=RUNNING): if task.resources: for resource, amount in six.iteritems(task.resources): used_resources[resource] += amount return used_resources def _rank(self, task): """ Return worker's rank function for task scheduling. :return: """ return task.priority, -task.time def _schedulable(self, task): if task.status != PENDING: return False for dep in task.deps: dep_task = self._state.get_task(dep, default=None) if dep_task is None or dep_task.status != DONE: return False return True def _reset_orphaned_batch_running_tasks(self, worker_id): running_batch_ids = { task.batch_id for task in self._state.get_running_tasks() if task.worker_running == worker_id } orphaned_tasks = [ task for task in self._state.get_active_tasks(BATCH_RUNNING) if task.worker_running == worker_id and task.batch_id not in running_batch_ids ] for task in orphaned_tasks: self._state.set_status(task, PENDING) @rpc_method(allow_null=False) def get_work(self, host=None, assistant=False, current_tasks=None, worker=None, **kwargs): # TODO: remove any expired nodes # Algo: iterate over all nodes, find the highest priority node no dependencies and available # resources. # Resource checking looks both at currently available resources and at which resources would # be available if all running tasks died and we rescheduled all workers greedily. We do both # checks in order to prevent a worker with many low-priority tasks from starving other # workers with higher priority tasks that share the same resources. # TODO: remove tasks that can't be done, figure out if the worker has absolutely # nothing it can wait for if self._config.prune_on_get_work: self.prune() assert worker is not None worker_id = worker # Return remaining tasks that have no FAILED descendants self.update(worker_id, {'host': host}, get_work=True) if assistant: self.add_worker(worker_id, [('assistant', assistant)]) batched_params, unbatched_params, batched_tasks, max_batch_size = None, None, [], 1 best_task = None if current_tasks is not None: ct_set = set(current_tasks) for task in sorted(self._state.get_running_tasks(), key=self._rank): if task.worker_running == worker_id and task.id not in ct_set: best_task = task if current_tasks is not None: # batch running tasks that weren't claimed since the last get_work go back in the pool self._reset_orphaned_batch_running_tasks(worker_id) locally_pending_tasks = 0 running_tasks = [] upstream_table = {} greedy_resources = collections.defaultdict(int) n_unique_pending = 0 worker = self._state.get_worker(worker_id) if worker.is_trivial_worker(self._state): relevant_tasks = worker.get_pending_tasks(self._state) used_resources = collections.defaultdict(int) greedy_workers = dict() # If there's no resources, then they can grab any task else: relevant_tasks = self._state.get_pending_tasks() used_resources = self._used_resources() activity_limit = time.time() - self._config.worker_disconnect_delay active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit) greedy_workers = dict((worker.id, worker.info.get('workers', 1)) for worker in active_workers) tasks = list(relevant_tasks) tasks.sort(key=self._rank, reverse=True) for task in tasks: in_workers = (assistant and getattr(task, 'runnable', bool(task.workers))) or worker_id in task.workers if task.status == RUNNING and in_workers: # Return a list of currently running tasks to the client, # makes it easier to troubleshoot other_worker = self._state.get_worker(task.worker_running) more_info = {'task_id': task.id, 'worker': str(other_worker)} if other_worker is not None: more_info.update(other_worker.info) running_tasks.append(more_info) if task.status == PENDING and in_workers: upstream_status = self._upstream_status(task.id, upstream_table) if upstream_status != UPSTREAM_DISABLED: locally_pending_tasks += 1 if len(task.workers) == 1 and not assistant: n_unique_pending += 1 if (best_task and batched_params and task.family == best_task.family and len(batched_tasks) < max_batch_size and task.is_batchable() and all( task.params.get(name) == value for name, value in unbatched_params.items()) and self._schedulable(task)): for name, params in batched_params.items(): params.append(task.params.get(name)) batched_tasks.append(task) if best_task: continue if task.status == RUNNING and (task.worker_running in greedy_workers): greedy_workers[task.worker_running] -= 1 for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount if self._schedulable(task) and self._has_resources(task.resources, greedy_resources): if in_workers and self._has_resources(task.resources, used_resources): best_task = task batch_param_names, max_batch_size = self._state.get_batcher( worker_id, task.family) if batch_param_names and task.is_batchable(): try: batched_params = { name: [task.params[name]] for name in batch_param_names } unbatched_params = { name: value for name, value in task.params.items() if name not in batched_params } batched_tasks.append(task) except KeyError: batched_params, unbatched_params = None, None else: workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers for task_worker in workers: if greedy_workers.get(task_worker, 0) > 0: # use up a worker greedy_workers[task_worker] -= 1 # keep track of the resources used in greedy scheduling for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount break reply = {'n_pending_tasks': locally_pending_tasks, 'running_tasks': running_tasks, 'task_id': None, 'n_unique_pending': n_unique_pending} if len(batched_tasks) > 1: batch_string = '|'.join(task.id for task in batched_tasks) batch_id = hashlib.md5(batch_string.encode('utf-8')).hexdigest() for task in batched_tasks: self._state.set_batch_running(task, batch_id, worker_id) combined_params = best_task.params.copy() combined_params.update(batched_params) reply['task_id'] = None reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = combined_params reply['batch_id'] = batch_id reply['batch_task_ids'] = [task.id for task in batched_tasks] elif best_task: self._state.set_status(best_task, RUNNING, self._config) best_task.worker_running = worker_id best_task.time_running = time.time() self._update_task_history(best_task, RUNNING, host=host) reply['task_id'] = best_task.id reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = best_task.params return reply @rpc_method(attempts=1) def ping(self, **kwargs): worker_id = kwargs['worker'] self.update(worker_id) def _upstream_status(self, task_id, upstream_status_table): if task_id in upstream_status_table: return upstream_status_table[task_id] elif self._state.has_task(task_id): task_stack = [task_id] while task_stack: dep_id = task_stack.pop() dep = self._state.get_task(dep_id) if dep: if dep.status == DONE: continue if dep_id not in upstream_status_table: if dep.status == PENDING and dep.deps: task_stack += [dep_id] + list(dep.deps) upstream_status_table[dep_id] = '' # will be updated postorder else: dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '') upstream_status_table[dep_id] = dep_status elif upstream_status_table[dep_id] == '' and dep.deps: # This is the postorder update step when we set the # status based on the previously calculated child elements upstream_severities = list(upstream_status_table.get(a_task_id) for a_task_id in dep.deps if a_task_id in upstream_status_table) or [''] status = min(upstream_severities, key=UPSTREAM_SEVERITY_KEY) upstream_status_table[dep_id] = status return upstream_status_table[dep_id] def _serialize_task(self, task_id, include_deps=True, deps=None): task = self._state.get_task(task_id) ret = { 'display_name': task.pretty_id, 'status': task.status, 'workers': list(task.workers), 'worker_running': task.worker_running, 'time_running': getattr(task, "time_running", None), 'start_time': task.time, 'last_updated': getattr(task, "updated", task.time), 'params': task.params, 'name': task.family, 'priority': task.priority, 'resources': task.resources, 'tracking_url': getattr(task, "tracking_url", None), 'status_message': getattr(task, "status_message", None) } if task.status == DISABLED: ret['re_enable_able'] = task.scheduler_disable_time is not None if include_deps: ret['deps'] = list(task.deps if deps is None else deps) return ret @rpc_method() def graph(self, **kwargs): self.prune() serialized = {} seen = set() for task in self._state.get_active_tasks(): serialized.update(self._traverse_graph(task.id, seen)) return serialized def _filter_done(self, task_ids): for task_id in task_ids: task = self._state.get_task(task_id) if task is None or task.status != DONE: yield task_id def _traverse_graph(self, root_task_id, seen=None, dep_func=None, include_done=True): """ Returns the dependency graph rooted at task_id This does a breadth-first traversal to find the nodes closest to the root before hitting the scheduler.max_graph_nodes limit. :param root_task_id: the id of the graph's root :return: A map of task id to serialized node """ if seen is None: seen = set() elif root_task_id in seen: return {} if dep_func is None: def dep_func(t): return t.deps seen.add(root_task_id) serialized = {} queue = collections.deque([root_task_id]) while queue: task_id = queue.popleft() task = self._state.get_task(task_id) if task is None or not task.family: logger.debug('Missing task for id [%s]', task_id) # NOTE : If a dependency is missing from self._state there is no way to deduce the # task family and parameters. family_match = TASK_FAMILY_RE.match(task_id) family = family_match.group(1) if family_match else UNKNOWN params = {'task_id': task_id} serialized[task_id] = { 'deps': [], 'status': UNKNOWN, 'workers': [], 'start_time': UNKNOWN, 'params': params, 'name': family, 'display_name': task_id, 'priority': 0, } else: deps = dep_func(task) if not include_done: deps = list(self._filter_done(deps)) serialized[task_id] = self._serialize_task(task_id, deps=deps) for dep in sorted(deps): if dep not in seen: seen.add(dep) queue.append(dep) if task_id != root_task_id: del serialized[task_id]['display_name'] if len(serialized) >= self._config.max_graph_nodes: break return serialized @rpc_method() def dep_graph(self, task_id, include_done=True, **kwargs): self.prune() if not self._state.has_task(task_id): return {} return self._traverse_graph(task_id, include_done=include_done) @rpc_method() def inverse_dep_graph(self, task_id, include_done=True, **kwargs): self.prune() if not self._state.has_task(task_id): return {} inverse_graph = collections.defaultdict(set) for task in self._state.get_active_tasks(): for dep in task.deps: inverse_graph[dep].add(task.id) return self._traverse_graph( task_id, dep_func=lambda t: inverse_graph[t.id], include_done=include_done) @rpc_method() def task_list(self, status='', upstream_status='', limit=True, search=None, max_shown_tasks=None, **kwargs): """ Query for a subset of tasks by status. """ self.prune() result = {} upstream_status_table = {} # used to memoize upstream status if search is None: def filter_func(_): return True else: terms = search.split() def filter_func(t): return all(term in t.pretty_id for term in terms) for task in filter(filter_func, self._state.get_active_tasks(status)): if task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table): serialized = self._serialize_task(task.id, False) result[task.id] = serialized if limit and len(result) > (max_shown_tasks or self._config.max_shown_tasks): return {'num_tasks': len(result)} return result def _first_task_display_name(self, worker): task_id = worker.info.get('first_task', '') if self._state.has_task(task_id): return self._state.get_task(task_id).pretty_id else: return task_id @rpc_method() def worker_list(self, include_running=True, **kwargs): self.prune() workers = [ dict( name=worker.id, last_active=worker.last_active, started=getattr(worker, 'started', None), first_task_display_name=self._first_task_display_name(worker), **worker.info ) for worker in self._state.get_active_workers()] workers.sort(key=lambda worker: worker['started'], reverse=True) if include_running: running = collections.defaultdict(dict) num_pending = collections.defaultdict(int) num_uniques = collections.defaultdict(int) for task in self._state.get_pending_tasks(): if task.status == RUNNING and task.worker_running: running[task.worker_running][task.id] = self._serialize_task(task.id, False) elif task.status == PENDING: for worker in task.workers: num_pending[worker] += 1 if len(task.workers) == 1: num_uniques[list(task.workers)[0]] += 1 for worker in workers: tasks = running[worker['name']] worker['num_running'] = len(tasks) worker['num_pending'] = num_pending[worker['name']] worker['num_uniques'] = num_uniques[worker['name']] worker['running'] = tasks return workers @rpc_method() def resource_list(self): """ Resources usage info and their consumers (tasks). """ self.prune() resources = [ dict( name=resource, num_total=r_dict['total'], num_used=r_dict['used'] ) for resource, r_dict in six.iteritems(self.resources())] if self._resources is not None: consumers = collections.defaultdict(dict) for task in self._state.get_running_tasks(): if task.status == RUNNING and task.resources: for resource, amount in six.iteritems(task.resources): consumers[resource][task.id] = self._serialize_task(task.id, False) for resource in resources: tasks = consumers[resource['name']] resource['num_consumer'] = len(tasks) resource['running'] = tasks return resources def resources(self): ''' get total resources and available ones ''' used_resources = self._used_resources() ret = collections.defaultdict(dict) for resource, total in six.iteritems(self._resources): ret[resource]['total'] = total if resource in used_resources: ret[resource]['used'] = used_resources[resource] else: ret[resource]['used'] = 0 return ret @rpc_method() def task_search(self, task_str, **kwargs): """ Query for a subset of tasks by task_id. :param task_str: :return: """ self.prune() result = collections.defaultdict(dict) for task in self._state.get_active_tasks(): if task.id.find(task_str) != -1: serialized = self._serialize_task(task.id, False) result[task.status][task.id] = serialized return result @rpc_method() def re_enable_task(self, task_id): serialized = {} task = self._state.get_task(task_id) if task and task.status == DISABLED and task.scheduler_disable_time: self._state.re_enable(task, self._config) serialized = self._serialize_task(task_id) return serialized @rpc_method() def fetch_error(self, task_id, **kwargs): if self._state.has_task(task_id): task = self._state.get_task(task_id) return {"taskId": task_id, "error": task.expl, 'displayName': task.pretty_id} else: return {"taskId": task_id, "error": ""} @rpc_method() def set_task_status_message(self, task_id, status_message): if self._state.has_task(task_id): task = self._state.get_task(task_id) task.status_message = status_message if task.status == RUNNING and task.batch_id is not None: for batch_task in self._state.get_batch_running_tasks(task.batch_id): batch_task.status_message = status_message @rpc_method() def get_task_status_message(self, task_id): if self._state.has_task(task_id): task = self._state.get_task(task_id) return {"taskId": task_id, "statusMessage": task.status_message} else: return {"taskId": task_id, "statusMessage": ""} def _update_task_history(self, task, status, host=None): try: if status == DONE or status == FAILED: successful = (status == DONE) self._task_history.task_finished(task, successful) elif status == PENDING: self._task_history.task_scheduled(task) elif status == RUNNING: self._task_history.task_started(task, host) except BaseException: logger.warning("Error saving Task history", exc_info=True) @property def task_history(self): # Used by server.py to expose the calls return self._task_history # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The system for scheduling tasks and executing them in order. Deals with dependencies, priorities, resources, etc. The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them. See :doc:`/central_scheduler` for more info. """ import collections import inspect try: import cPickle as pickle except ImportError: import pickle import functools import hashlib import itertools import logging import os import re import time from luigi import six from luigi import configuration from luigi import notifications from luigi import parameter from luigi import task_history as history from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN, \ BATCH_RUNNING from luigi.task import Config logger = logging.getLogger(__name__) UPSTREAM_RUNNING = 'UPSTREAM_RUNNING' UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT' UPSTREAM_FAILED = 'UPSTREAM_FAILED' UPSTREAM_DISABLED = 'UPSTREAM_DISABLED' UPSTREAM_SEVERITY_ORDER = ( '', UPSTREAM_RUNNING, UPSTREAM_MISSING_INPUT, UPSTREAM_FAILED, UPSTREAM_DISABLED, ) UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index STATUS_TO_UPSTREAM_MAP = { FAILED: UPSTREAM_FAILED, RUNNING: UPSTREAM_RUNNING, BATCH_RUNNING: UPSTREAM_RUNNING, PENDING: UPSTREAM_MISSING_INPUT, DISABLED: UPSTREAM_DISABLED, } TASK_FAMILY_RE = re.compile(r'([^(_]+)[(_]') RPC_METHODS = {} _retry_policy_fields = [ "retry_count", "disable_hard_timeout", "disable_window", ] RetryPolicy = collections.namedtuple("RetryPolicy", _retry_policy_fields) def _get_empty_retry_policy(): return RetryPolicy(*[None] * len(_retry_policy_fields)) def rpc_method(**request_args): def _rpc_method(fn): # If request args are passed, return this function again for use as # the decorator function with the request args attached. fn_args = inspect.getargspec(fn) assert not fn_args.varargs assert fn_args.args[0] == 'self' all_args = fn_args.args[1:] defaults = dict(zip(reversed(all_args), reversed(fn_args.defaults or ()))) required_args = frozenset(arg for arg in all_args if arg not in defaults) fn_name = fn.__name__ @functools.wraps(fn) def rpc_func(self, *args, **kwargs): actual_args = defaults.copy() actual_args.update(dict(zip(all_args, args))) actual_args.update(kwargs) if not all(arg in actual_args for arg in required_args): raise TypeError('{} takes {} arguments ({} given)'.format( fn_name, len(all_args), len(actual_args))) return self._request('/api/{}'.format(fn_name), actual_args, **request_args) RPC_METHODS[fn_name] = rpc_func return fn return _rpc_method class scheduler(Config): # TODO(erikbern): the config_path is needed for backwards compatilibity. We # should drop the compatibility at some point retry_delay = parameter.FloatParameter(default=900.0) remove_delay = parameter.FloatParameter(default=600.0) worker_disconnect_delay = parameter.FloatParameter(default=60.0) state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle') # Jobs are disabled if we see more than retry_count failures in disable_window seconds. # These disables last for disable_persist seconds. disable_window = parameter.IntParameter(default=3600, config_path=dict(section='scheduler', name='disable-window-seconds')) retry_count = parameter.IntParameter(default=999999999, config_path=dict(section='scheduler', name='disable_failures')) disable_hard_timeout = parameter.IntParameter(default=999999999, config_path=dict(section='scheduler', name='disable-hard-timeout')) disable_persist = parameter.IntParameter(default=86400, config_path=dict(section='scheduler', name='disable-persist-seconds')) max_shown_tasks = parameter.IntParameter(default=100000) max_graph_nodes = parameter.IntParameter(default=100000) record_task_history = parameter.BoolParameter(default=False) prune_on_get_work = parameter.BoolParameter(default=False) def _get_retry_policy(self): return RetryPolicy(self.retry_count, self.disable_hard_timeout, self.disable_window) class Failures(object): """ This class tracks the number of failures in a given time window. Failures added are marked with the current timestamp, and this class counts the number of failures in a sliding time window ending at the present. """ def __init__(self, window): """ Initialize with the given window. :param window: how long to track failures for, as a float (number of seconds). """ self.window = window self.failures = collections.deque() self.first_failure_time = None def add_failure(self): """ Add a failure event with the current timestamp. """ failure_time = time.time() if not self.first_failure_time: self.first_failure_time = failure_time self.failures.append(failure_time) def num_failures(self): """ Return the number of failures in the window. """ min_time = time.time() - self.window while self.failures and self.failures[0] < min_time: self.failures.popleft() return len(self.failures) def clear(self): """ Clear the failure queue. """ self.failures.clear() def _get_default(x, default): if x is not None: return x else: return default class Task(object): def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None, params=None, tracking_url=None, status_message=None, retry_policy='notoptional'): self.id = task_id self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active) self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active if deps is None: self.deps = set() else: self.deps = set(deps) self.status = status # PENDING, RUNNING, FAILED or DONE self.time = time.time() # Timestamp when task was first added self.updated = self.time self.retry = None self.remove = None self.worker_running = None # the worker id that is currently running the task or None self.time_running = None # Timestamp when picked up by worker self.expl = None self.priority = priority self.resources = _get_default(resources, {}) self.family = family self.module = module self.params = _get_default(params, {}) self.retry_policy = retry_policy self.failures = Failures(self.retry_policy.disable_window) self.tracking_url = tracking_url self.status_message = status_message self.scheduler_disable_time = None self.runnable = False self.batchable = False self.batch_id = None def __repr__(self): return "Task(%r)" % vars(self) # TODO(2017-08-10) replace this function with direct calls to batchable # this only exists for backward compatibility def is_batchable(self): try: return self.batchable except AttributeError: return False def add_failure(self): self.failures.add_failure() def has_excessive_failures(self): if self.failures.first_failure_time is not None: if (time.time() >= self.failures.first_failure_time + self.retry_policy.disable_hard_timeout): return True logger.debug('%s task num failures is %s and limit is %s', self.id, self.failures.num_failures(), self.retry_policy.retry_count) if self.failures.num_failures() >= self.retry_policy.retry_count: logger.debug('%s task num failures limit(%s) is exceeded', self.id, self.retry_policy.retry_count) return True return False @property def pretty_id(self): param_str = ', '.join('{}={}'.format(key, value) for key, value in self.params.items()) return '{}({})'.format(self.family, param_str) class Worker(object): """ Structure for tracking worker activity and keeping their references. """ def __init__(self, worker_id, last_active=None): self.id = worker_id self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host) self.last_active = last_active or time.time() # seconds since epoch self.last_get_work = None self.started = time.time() # seconds since epoch self.tasks = set() # task objects self.info = {} self.disabled = False def add_info(self, info): self.info.update(info) def update(self, worker_reference, get_work=False): if worker_reference: self.reference = worker_reference self.last_active = time.time() if get_work: self.last_get_work = time.time() def prune(self, config): # Delete workers that haven't said anything for a while (probably killed) if self.last_active + config.worker_disconnect_delay < time.time(): return True def get_pending_tasks(self, state): """ Get PENDING (and RUNNING) tasks for this worker. You have to pass in the state for optimization reasons. """ if len(self.tasks) < state.num_pending_tasks(): return six.moves.filter(lambda task: task.status in [PENDING, RUNNING], self.tasks) else: return six.moves.filter(lambda task: self.id in task.workers, state.get_pending_tasks()) def is_trivial_worker(self, state): """ If it's not an assistant having only tasks that are without requirements. We have to pass the state parameter for optimization reasons. """ if self.assistant: return False return all(not task.resources for task in self.get_pending_tasks(state)) @property def assistant(self): return self.info.get('assistant', False) def __str__(self): return self.id class SimpleTaskState(object): """ Keep track of the current state and handle persistance. The point of this class is to enable other ways to keep state, eg. by using a database These will be implemented by creating an abstract base class that this and other classes inherit from. """ def __init__(self, state_path): self._state_path = state_path self._tasks = {} # map from id to a Task object self._status_tasks = collections.defaultdict(dict) self._active_workers = {} # map from id to a Worker object self._task_batchers = {} def get_state(self): return self._tasks, self._active_workers, self._task_batchers def set_state(self, state): self._tasks, self._active_workers = state[:2] if len(state) >= 3: self._task_batchers = state[2] def dump(self): try: with open(self._state_path, 'wb') as fobj: pickle.dump(self.get_state(), fobj) except IOError: logger.warning("Failed saving scheduler state", exc_info=1) else: logger.info("Saved state in %s", self._state_path) # prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control? def load(self): if os.path.exists(self._state_path): logger.info("Attempting to load state from %s", self._state_path) try: with open(self._state_path, 'rb') as fobj: state = pickle.load(fobj) except BaseException: logger.exception("Error when loading state. Starting from empty state.") return self.set_state(state) self._status_tasks = collections.defaultdict(dict) for task in six.itervalues(self._tasks): self._status_tasks[task.status][task.id] = task else: logger.info("No prior state file exists at %s. Starting with empty state", self._state_path) def get_active_tasks(self, status=None): if status: for task in six.itervalues(self._status_tasks[status]): yield task else: for task in six.itervalues(self._tasks): yield task def get_batch_running_tasks(self, batch_id): assert batch_id is not None return [ task for task in self.get_active_tasks(BATCH_RUNNING) if task.batch_id == batch_id ] def get_running_tasks(self): return six.itervalues(self._status_tasks[RUNNING]) def get_pending_tasks(self): return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status]) for status in [PENDING, RUNNING]) def set_batcher(self, worker_id, family, batcher_args, max_batch_size): self._task_batchers.setdefault(worker_id, {}) self._task_batchers[worker_id][family] = (batcher_args, max_batch_size) def get_batcher(self, worker_id, family): return self._task_batchers.get(worker_id, {}).get(family, (None, 1)) def num_pending_tasks(self): """ Return how many tasks are PENDING + RUNNING. O(1). """ return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING]) def get_task(self, task_id, default=None, setdefault=None): if setdefault: task = self._tasks.setdefault(task_id, setdefault) self._status_tasks[task.status][task.id] = task return task else: return self._tasks.get(task_id, default) def has_task(self, task_id): return task_id in self._tasks def re_enable(self, task, config=None): task.scheduler_disable_time = None task.failures.clear() if config: self.set_status(task, FAILED, config) task.failures.clear() def set_batch_running(self, task, batch_id, worker_id): self.set_status(task, BATCH_RUNNING) task.batch_id = batch_id task.worker_running = worker_id def set_status(self, task, new_status, config=None): if new_status == FAILED: assert config is not None if new_status == DISABLED and task.status in (RUNNING, BATCH_RUNNING): return remove_on_failure = task.batch_id is not None and not task.batchable if task.status == DISABLED: if new_status == DONE: self.re_enable(task) # don't allow workers to override a scheduler disable elif task.scheduler_disable_time is not None and new_status != DISABLED: return if task.status == RUNNING and task.batch_id is not None: for batch_task in self.get_batch_running_tasks(task.batch_id): self.set_status(batch_task, new_status, config) batch_task.batch_id = None task.batch_id = None if new_status == FAILED and task.status != DISABLED: task.add_failure() if task.has_excessive_failures(): task.scheduler_disable_time = time.time() new_status = DISABLED notifications.send_error_email( 'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id), '{task} failed {failures} times in the last {window} seconds, so it is being ' 'disabled for {persist} seconds'.format( failures=task.retry_policy.retry_count, task=task.id, window=config.disable_window, persist=config.disable_persist, )) elif new_status == DISABLED: task.scheduler_disable_time = None if new_status != task.status: self._status_tasks[task.status].pop(task.id) self._status_tasks[new_status][task.id] = task task.status = new_status task.updated = time.time() if new_status == FAILED: task.retry = time.time() + config.retry_delay if remove_on_failure: task.remove = time.time() def fail_dead_worker_task(self, task, config, assistants): # If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants: logger.info("Task %r is marked as running by disconnected worker %r -> marking as " "FAILED with retry delay of %rs", task.id, task.worker_running, config.retry_delay) task.worker_running = None self.set_status(task, FAILED, config) task.retry = time.time() + config.retry_delay def update_status(self, task, config): # Mark tasks with no remaining active stakeholders for deletion if (not task.stakeholders) and (task.remove is None) and (task.status != RUNNING): # We don't check for the RUNNING case, because that is already handled # by the fail_dead_worker_task function. logger.debug("Task %r has no stakeholders anymore -> might remove " "task in %s seconds", task.id, config.remove_delay) task.remove = time.time() + config.remove_delay # Re-enable task after the disable time expires if task.status == DISABLED and task.scheduler_disable_time is not None: if time.time() - task.scheduler_disable_time > config.disable_persist: self.re_enable(task, config) # Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0 if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time(): self.set_status(task, PENDING, config) def may_prune(self, task): return task.remove and time.time() >= task.remove def inactivate_tasks(self, delete_tasks): # The terminology is a bit confusing: we used to "delete" tasks when they became inactive, # but with a pluggable state storage, you might very well want to keep some history of # older tasks as well. That's why we call it "inactivate" (as in the verb) for task in delete_tasks: task_obj = self._tasks.pop(task) self._status_tasks[task_obj.status].pop(task) def get_active_workers(self, last_active_lt=None, last_get_work_gt=None): for worker in six.itervalues(self._active_workers): if last_active_lt is not None and worker.last_active >= last_active_lt: continue last_get_work = getattr(worker, 'last_get_work', None) if last_get_work_gt is not None and ( last_get_work is None or last_get_work <= last_get_work_gt): continue yield worker def get_assistants(self, last_active_lt=None): return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt)) def get_worker_ids(self): return self._active_workers.keys() # only used for unit tests def get_worker(self, worker_id): return self._active_workers.setdefault(worker_id, Worker(worker_id)) def inactivate_workers(self, delete_workers): # Mark workers as inactive for worker in delete_workers: self._active_workers.pop(worker) self._remove_workers_from_tasks(delete_workers) def _remove_workers_from_tasks(self, workers, remove_stakeholders=True): for task in self.get_active_tasks(): if remove_stakeholders: task.stakeholders.difference_update(workers) task.workers.difference_update(workers) def disable_workers(self, workers): self._remove_workers_from_tasks(workers, remove_stakeholders=False) for worker in workers: self.get_worker(worker).disabled = True class Scheduler(object): """ Async scheduler that can handle multiple workers, etc. Can be run locally or on a server (using RemoteScheduler + server.Server). """ def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs): """ Keyword Arguments: :param config: an object of class "scheduler" or None (in which the global instance will be used) :param resources: a dict of str->int constraints :param task_history_impl: ignore config and use this object as the task history """ self._config = config or scheduler(**kwargs) self._state = SimpleTaskState(self._config.state_path) if task_history_impl: self._task_history = task_history_impl elif self._config.record_task_history: from luigi import db_task_history # Needs sqlalchemy, thus imported here self._task_history = db_task_history.DbTaskHistory() else: self._task_history = history.NopHistory() self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter? self._make_task = functools.partial(Task, retry_policy=self._config._get_retry_policy()) self._worker_requests = {} def load(self): self._state.load() def dump(self): self._state.dump() @rpc_method() def prune(self): logger.info("Starting pruning of task graph") self._prune_workers() self._prune_tasks() logger.info("Done pruning task graph") def _prune_workers(self): remove_workers = [] for worker in self._state.get_active_workers(): if worker.prune(self._config): logger.debug("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay) remove_workers.append(worker.id) self._state.inactivate_workers(remove_workers) def _prune_tasks(self): assistant_ids = set(w.id for w in self._state.get_assistants()) remove_tasks = [] for task in self._state.get_active_tasks(): self._state.fail_dead_worker_task(task, self._config, assistant_ids) self._state.update_status(task, self._config) if self._state.may_prune(task): logger.info("Removing task %r", task.id) remove_tasks.append(task.id) self._state.inactivate_tasks(remove_tasks) def update(self, worker_id, worker_reference=None, get_work=False): """ Keep track of whenever the worker was last active. """ worker = self._state.get_worker(worker_id) worker.update(worker_reference, get_work=get_work) return not getattr(worker, 'disabled', False) def _update_priority(self, task, prio, worker): """ Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled. """ task.priority = prio = max(prio, task.priority) for dep in task.deps or []: t = self._state.get_task(dep) if t is not None and prio > t.priority: self._update_priority(t, prio, worker) @rpc_method() def add_task_batcher(self, worker, task_family, batched_args, max_batch_size=float('inf')): self._state.set_batcher(worker, task_family, batched_args, max_batch_size) @rpc_method() def add_task(self, task_id=None, status=PENDING, runnable=True, deps=None, new_deps=None, expl=None, resources=None, priority=0, family='', module=None, params=None, assistant=False, tracking_url=None, worker=None, batchable=None, batch_id=None, retry_policy_dict={}, **kwargs): """ * add task identified by task_id if it doesn't exist * if deps is not None, update dependency list * update status of task * add additional workers/stakeholders * update priority when needed """ assert worker is not None worker_id = worker worker_enabled = self.update(worker_id) retry_policy = self._generate_retry_policy(retry_policy_dict) if worker_enabled: _default_task = self._make_task( task_id=task_id, status=PENDING, deps=deps, resources=resources, priority=priority, family=family, module=module, params=params, ) else: _default_task = None task = self._state.get_task(task_id, setdefault=_default_task) if task is None or (task.status != RUNNING and not worker_enabled): return # for setting priority, we'll sometimes create tasks with unset family and params if not task.family: task.family = family if not getattr(task, 'module', None): task.module = module if not task.params: task.params = _get_default(params, {}) if batch_id is not None: task.batch_id = batch_id if status == RUNNING and not task.worker_running: task.worker_running = worker_id if tracking_url is not None or task.status != RUNNING: task.tracking_url = tracking_url if task.batch_id is not None: for batch_task in self._state.get_batch_running_tasks(task.batch_id): batch_task.tracking_url = tracking_url if batchable is not None: task.batchable = batchable if task.remove is not None: task.remove = None # unmark task for removal so it isn't removed after being added if expl is not None: task.expl = expl if task.batch_id is not None: for batch_task in self._state.get_batch_running_tasks(task.batch_id): batch_task.expl = expl if not (task.status in (RUNNING, BATCH_RUNNING) and status == PENDING) or new_deps: # don't allow re-scheduling of task while it is running, it must either fail or succeed first if status == PENDING or status != task.status: # Update the DB only if there was a acctual change, to prevent noise. # We also check for status == PENDING b/c that's the default value # (so checking for status != task.status woule lie) self._update_task_history(task, status) self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config) if deps is not None: task.deps = set(deps) if new_deps is not None: task.deps.update(new_deps) if resources is not None: task.resources = resources if worker_enabled and not assistant: task.stakeholders.add(worker_id) # Task dependencies might not exist yet. Let's create dummy tasks for them for now. # Otherwise the task dependencies might end up being pruned if scheduling takes a long time for dep in task.deps or []: t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority)) t.stakeholders.add(worker_id) self._update_priority(task, priority, worker_id) # Because some tasks (non-dynamic dependencies) are `_make_task`ed # before we know their retry_policy, we always set it here task.retry_policy = retry_policy if runnable and status != FAILED and worker_enabled: task.workers.add(worker_id) self._state.get_worker(worker_id).tasks.add(task) task.runnable = runnable @rpc_method() def add_worker(self, worker, info, **kwargs): self._state.get_worker(worker).add_info(info) @rpc_method() def disable_worker(self, worker): self._state.disable_workers({worker}) @rpc_method() def update_resources(self, **resources): if self._resources is None: self._resources = {} self._resources.update(resources) def _generate_retry_policy(self, task_retry_policy_dict): retry_policy_dict = self._config._get_retry_policy()._asdict() retry_policy_dict.update({k: v for k, v in six.iteritems(task_retry_policy_dict) if v is not None}) return RetryPolicy(**retry_policy_dict) def _has_resources(self, needed_resources, used_resources): if needed_resources is None: return True available_resources = self._resources or {} for resource, amount in six.iteritems(needed_resources): if amount + used_resources[resource] > available_resources.get(resource, 1): return False return True def _used_resources(self): used_resources = collections.defaultdict(int) if self._resources is not None: for task in self._state.get_active_tasks(status=RUNNING): if task.resources: for resource, amount in six.iteritems(task.resources): used_resources[resource] += amount return used_resources def _rank(self, task): """ Return worker's rank function for task scheduling. :return: """ return task.priority, -task.time def _schedulable(self, task): if task.status != PENDING: return False for dep in task.deps: dep_task = self._state.get_task(dep, default=None) if dep_task is None or dep_task.status != DONE: return False return True def _reset_orphaned_batch_running_tasks(self, worker_id): running_batch_ids = { task.batch_id for task in self._state.get_running_tasks() if task.worker_running == worker_id } orphaned_tasks = [ task for task in self._state.get_active_tasks(BATCH_RUNNING) if task.worker_running == worker_id and task.batch_id not in running_batch_ids ] for task in orphaned_tasks: self._state.set_status(task, PENDING) @rpc_method(allow_null=False) def get_work(self, host=None, assistant=False, current_tasks=None, worker=None, **kwargs): # TODO: remove any expired nodes # Algo: iterate over all nodes, find the highest priority node no dependencies and available # resources. # Resource checking looks both at currently available resources and at which resources would # be available if all running tasks died and we rescheduled all workers greedily. We do both # checks in order to prevent a worker with many low-priority tasks from starving other # workers with higher priority tasks that share the same resources. # TODO: remove tasks that can't be done, figure out if the worker has absolutely # nothing it can wait for if self._config.prune_on_get_work: self.prune() assert worker is not None worker_id = worker # Return remaining tasks that have no FAILED descendants self.update(worker_id, {'host': host}, get_work=True) if assistant: self.add_worker(worker_id, [('assistant', assistant)]) batched_params, unbatched_params, batched_tasks, max_batch_size = None, None, [], 1 best_task = None if current_tasks is not None: ct_set = set(current_tasks) for task in sorted(self._state.get_running_tasks(), key=self._rank): if task.worker_running == worker_id and task.id not in ct_set: best_task = task if current_tasks is not None: # batch running tasks that weren't claimed since the last get_work go back in the pool self._reset_orphaned_batch_running_tasks(worker_id) locally_pending_tasks = 0 running_tasks = [] upstream_table = {} greedy_resources = collections.defaultdict(int) n_unique_pending = 0 worker = self._state.get_worker(worker_id) if worker.is_trivial_worker(self._state): relevant_tasks = worker.get_pending_tasks(self._state) used_resources = collections.defaultdict(int) greedy_workers = dict() # If there's no resources, then they can grab any task else: relevant_tasks = self._state.get_pending_tasks() used_resources = self._used_resources() activity_limit = time.time() - self._config.worker_disconnect_delay active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit) greedy_workers = dict((worker.id, worker.info.get('workers', 1)) for worker in active_workers) tasks = list(relevant_tasks) tasks.sort(key=self._rank, reverse=True) for task in tasks: in_workers = (assistant and getattr(task, 'runnable', bool(task.workers))) or worker_id in task.workers if task.status == RUNNING and in_workers: # Return a list of currently running tasks to the client, # makes it easier to troubleshoot other_worker = self._state.get_worker(task.worker_running) more_info = {'task_id': task.id, 'worker': str(other_worker)} if other_worker is not None: more_info.update(other_worker.info) running_tasks.append(more_info) if task.status == PENDING and in_workers: upstream_status = self._upstream_status(task.id, upstream_table) if upstream_status != UPSTREAM_DISABLED: locally_pending_tasks += 1 if len(task.workers) == 1 and not assistant: n_unique_pending += 1 if (best_task and batched_params and task.family == best_task.family and len(batched_tasks) < max_batch_size and task.is_batchable() and all( task.params.get(name) == value for name, value in unbatched_params.items()) and self._schedulable(task)): for name, params in batched_params.items(): params.append(task.params.get(name)) batched_tasks.append(task) if best_task: continue if task.status == RUNNING and (task.worker_running in greedy_workers): greedy_workers[task.worker_running] -= 1 for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount if self._schedulable(task) and self._has_resources(task.resources, greedy_resources): if in_workers and self._has_resources(task.resources, used_resources): best_task = task batch_param_names, max_batch_size = self._state.get_batcher( worker_id, task.family) if batch_param_names and task.is_batchable(): try: batched_params = { name: [task.params[name]] for name in batch_param_names } unbatched_params = { name: value for name, value in task.params.items() if name not in batched_params } batched_tasks.append(task) except KeyError: batched_params, unbatched_params = None, None else: workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers for task_worker in workers: if greedy_workers.get(task_worker, 0) > 0: # use up a worker greedy_workers[task_worker] -= 1 # keep track of the resources used in greedy scheduling for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount break reply = {'n_pending_tasks': locally_pending_tasks, 'running_tasks': running_tasks, 'task_id': None, 'n_unique_pending': n_unique_pending} if len(batched_tasks) > 1: batch_string = '|'.join(task.id for task in batched_tasks) batch_id = hashlib.md5(batch_string.encode('utf-8')).hexdigest() for task in batched_tasks: self._state.set_batch_running(task, batch_id, worker_id) combined_params = best_task.params.copy() combined_params.update(batched_params) reply['task_id'] = None reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = combined_params reply['batch_id'] = batch_id reply['batch_task_ids'] = [task.id for task in batched_tasks] elif best_task: self._state.set_status(best_task, RUNNING, self._config) best_task.worker_running = worker_id best_task.time_running = time.time() self._update_task_history(best_task, RUNNING, host=host) reply['task_id'] = best_task.id reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = best_task.params return reply @rpc_method(attempts=1) def ping(self, **kwargs): worker_id = kwargs['worker'] self.update(worker_id) def _upstream_status(self, task_id, upstream_status_table): if task_id in upstream_status_table: return upstream_status_table[task_id] elif self._state.has_task(task_id): task_stack = [task_id] while task_stack: dep_id = task_stack.pop() dep = self._state.get_task(dep_id) if dep: if dep.status == DONE: continue if dep_id not in upstream_status_table: if dep.status == PENDING and dep.deps: task_stack += [dep_id] + list(dep.deps) upstream_status_table[dep_id] = '' # will be updated postorder else: dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '') upstream_status_table[dep_id] = dep_status elif upstream_status_table[dep_id] == '' and dep.deps: # This is the postorder update step when we set the # status based on the previously calculated child elements upstream_severities = list(upstream_status_table.get(a_task_id) for a_task_id in dep.deps if a_task_id in upstream_status_table) or [''] status = min(upstream_severities, key=UPSTREAM_SEVERITY_KEY) upstream_status_table[dep_id] = status return upstream_status_table[dep_id] def _serialize_task(self, task_id, include_deps=True, deps=None): task = self._state.get_task(task_id) ret = { 'display_name': task.pretty_id, 'status': task.status, 'workers': list(task.workers), 'worker_running': task.worker_running, 'time_running': getattr(task, "time_running", None), 'start_time': task.time, 'last_updated': getattr(task, "updated", task.time), 'params': task.params, 'name': task.family, 'priority': task.priority, 'resources': task.resources, 'tracking_url': getattr(task, "tracking_url", None), 'status_message': getattr(task, "status_message", None) } if task.status == DISABLED: ret['re_enable_able'] = task.scheduler_disable_time is not None if include_deps: ret['deps'] = list(task.deps if deps is None else deps) return ret @rpc_method() def graph(self, **kwargs): self.prune() serialized = {} seen = set() for task in self._state.get_active_tasks(): serialized.update(self._traverse_graph(task.id, seen)) return serialized def _filter_done(self, task_ids): for task_id in task_ids: task = self._state.get_task(task_id) if task is None or task.status != DONE: yield task_id def _traverse_graph(self, root_task_id, seen=None, dep_func=None, include_done=True): """ Returns the dependency graph rooted at task_id This does a breadth-first traversal to find the nodes closest to the root before hitting the scheduler.max_graph_nodes limit. :param root_task_id: the id of the graph's root :return: A map of task id to serialized node """ if seen is None: seen = set() elif root_task_id in seen: return {} if dep_func is None: def dep_func(t): return t.deps seen.add(root_task_id) serialized = {} queue = collections.deque([root_task_id]) while queue: task_id = queue.popleft() task = self._state.get_task(task_id) if task is None or not task.family: logger.debug('Missing task for id [%s]', task_id) # NOTE : If a dependency is missing from self._state there is no way to deduce the # task family and parameters. family_match = TASK_FAMILY_RE.match(task_id) family = family_match.group(1) if family_match else UNKNOWN params = {'task_id': task_id} serialized[task_id] = { 'deps': [], 'status': UNKNOWN, 'workers': [], 'start_time': UNKNOWN, 'params': params, 'name': family, 'display_name': task_id, 'priority': 0, } else: deps = dep_func(task) if not include_done: deps = list(self._filter_done(deps)) serialized[task_id] = self._serialize_task(task_id, deps=deps) for dep in sorted(deps): if dep not in seen: seen.add(dep) queue.append(dep) if task_id != root_task_id: del serialized[task_id]['display_name'] if len(serialized) >= self._config.max_graph_nodes: break return serialized @rpc_method() def dep_graph(self, task_id, include_done=True, **kwargs): self.prune() if not self._state.has_task(task_id): return {} return self._traverse_graph(task_id, include_done=include_done) @rpc_method() def inverse_dep_graph(self, task_id, include_done=True, **kwargs): self.prune() if not self._state.has_task(task_id): return {} inverse_graph = collections.defaultdict(set) for task in self._state.get_active_tasks(): for dep in task.deps: inverse_graph[dep].add(task.id) return self._traverse_graph( task_id, dep_func=lambda t: inverse_graph[t.id], include_done=include_done) @rpc_method() def task_list(self, status='', upstream_status='', limit=True, search=None, max_shown_tasks=None, **kwargs): """ Query for a subset of tasks by status. """ self.prune() result = {} upstream_status_table = {} # used to memoize upstream status if search is None: def filter_func(_): return True else: terms = search.split() def filter_func(t): return all(term in t.pretty_id for term in terms) for task in filter(filter_func, self._state.get_active_tasks(status)): if task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table): serialized = self._serialize_task(task.id, False) result[task.id] = serialized if limit and len(result) > (max_shown_tasks or self._config.max_shown_tasks): return {'num_tasks': len(result)} return result def _first_task_display_name(self, worker): task_id = worker.info.get('first_task', '') if self._state.has_task(task_id): return self._state.get_task(task_id).pretty_id else: return task_id @rpc_method() def worker_list(self, include_running=True, **kwargs): self.prune() workers = [ dict( name=worker.id, last_active=worker.last_active, started=getattr(worker, 'started', None), first_task_display_name=self._first_task_display_name(worker), **worker.info ) for worker in self._state.get_active_workers()] workers.sort(key=lambda worker: worker['started'], reverse=True) if include_running: running = collections.defaultdict(dict) num_pending = collections.defaultdict(int) num_uniques = collections.defaultdict(int) for task in self._state.get_pending_tasks(): if task.status == RUNNING and task.worker_running: running[task.worker_running][task.id] = self._serialize_task(task.id, False) elif task.status == PENDING: for worker in task.workers: num_pending[worker] += 1 if len(task.workers) == 1: num_uniques[list(task.workers)[0]] += 1 for worker in workers: tasks = running[worker['name']] worker['num_running'] = len(tasks) worker['num_pending'] = num_pending[worker['name']] worker['num_uniques'] = num_uniques[worker['name']] worker['running'] = tasks return workers @rpc_method() def resource_list(self): """ Resources usage info and their consumers (tasks). """ self.prune() resources = [ dict( name=resource, num_total=r_dict['total'], num_used=r_dict['used'] ) for resource, r_dict in six.iteritems(self.resources())] if self._resources is not None: consumers = collections.defaultdict(dict) for task in self._state.get_running_tasks(): if task.status == RUNNING and task.resources: for resource, amount in six.iteritems(task.resources): consumers[resource][task.id] = self._serialize_task(task.id, False) for resource in resources: tasks = consumers[resource['name']] resource['num_consumer'] = len(tasks) resource['running'] = tasks return resources def resources(self): ''' get total resources and available ones ''' used_resources = self._used_resources() ret = collections.defaultdict(dict) for resource, total in six.iteritems(self._resources): ret[resource]['total'] = total if resource in used_resources: ret[resource]['used'] = used_resources[resource] else: ret[resource]['used'] = 0 return ret @rpc_method() def task_search(self, task_str, **kwargs): """ Query for a subset of tasks by task_id. :param task_str: :return: """ self.prune() result = collections.defaultdict(dict) for task in self._state.get_active_tasks(): if task.id.find(task_str) != -1: serialized = self._serialize_task(task.id, False) result[task.status][task.id] = serialized return result @rpc_method() def re_enable_task(self, task_id): serialized = {} task = self._state.get_task(task_id) if task and task.status == DISABLED and task.scheduler_disable_time: self._state.re_enable(task, self._config) serialized = self._serialize_task(task_id) return serialized @rpc_method() def fetch_error(self, task_id, **kwargs): if self._state.has_task(task_id): task = self._state.get_task(task_id) return {"taskId": task_id, "error": task.expl, 'displayName': task.pretty_id} else: return {"taskId": task_id, "error": ""} @rpc_method() def set_task_status_message(self, task_id, status_message): if self._state.has_task(task_id): task = self._state.get_task(task_id) task.status_message = status_message if task.status == RUNNING and task.batch_id is not None: for batch_task in self._state.get_batch_running_tasks(task.batch_id): batch_task.status_message = status_message @rpc_method() def get_task_status_message(self, task_id): if self._state.has_task(task_id): task = self._state.get_task(task_id) return {"taskId": task_id, "statusMessage": task.status_message} else: return {"taskId": task_id, "statusMessage": ""} def _update_task_history(self, task, status, host=None): try: if status == DONE or status == FAILED: successful = (status == DONE) self._task_history.task_finished(task, successful) elif status == PENDING: self._task_history.task_scheduled(task) elif status == RUNNING: self._task_history.task_started(task, host) except BaseException: logger.warning("Error saving Task history", exc_info=True) @property def task_history(self): # Used by server.py to expose the calls return self._task_history
BugsInPy/BugsInPy/temp/projects/luigi/bug-10-fixed/luigi/luigi/scheduler.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-10-buggy/luigi/luigi/scheduler.py
luigi-bug-23
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ This module contains the bindings for command line integration and dynamic loading of tasks """ import argparse import logging import logging.config import optparse import os import sys import tempfile from luigi import configuration from luigi import lock from luigi import parameter from luigi import rpc from luigi import scheduler from luigi import task from luigi import worker from luigi.task_register import Register def setup_interface_logging(conf_file=None): # use a variable in the function object to determine if it has run before if getattr(setup_interface_logging, "has_run", False): return if conf_file is None: logger = logging.getLogger('luigi-interface') logger.setLevel(logging.DEBUG) stream_handler = logging.StreamHandler() stream_handler.setLevel(logging.DEBUG) formatter = logging.Formatter('%(levelname)s: %(message)s') stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) else: logging.config.fileConfig(conf_file, disable_existing_loggers=False) setup_interface_logging.has_run = True class core(task.Config): ''' Keeps track of a bunch of environment params. Uses the internal luigi parameter mechanism. The nice thing is that we can instantiate this class and get an object with all the environment variables set. This is arguably a bit of a hack. ''' use_cmdline_section = False local_scheduler = parameter.BoolParameter( default=False, description='Use local scheduling') scheduler_host = parameter.Parameter( default='localhost', description='Hostname of machine running remote scheduler', config_path=dict(section='core', name='default-scheduler-host')) scheduler_port = parameter.IntParameter( default=8082, description='Port of remote scheduler api process', config_path=dict(section='core', name='default-scheduler-port')) lock_size = parameter.IntParameter( default=1, description="Maximum number of workers running the same command") no_lock = parameter.BoolParameter( default=False, description='Ignore if similar process is already running') lock_pid_dir = parameter.Parameter( default=os.path.join(tempfile.gettempdir(), 'luigi'), description='Directory to store the pid file') workers = parameter.IntParameter( default=1, description='Maximum number of parallel tasks to run') logging_conf_file = parameter.Parameter( default=None, description='Configuration file for logging') module = parameter.Parameter( default=None, description='Used for dynamic loading of modules') # see DynamicArgParseInterface parallel_scheduling = parameter.BoolParameter( default=False, description='Use multiprocessing to do scheduling in parallel.') assistant = parameter.BoolParameter( default=False, description='Run any task from the scheduler.') class WorkerSchedulerFactory(object): def create_local_scheduler(self): return scheduler.CentralPlannerScheduler() def create_remote_scheduler(self, host, port): return rpc.RemoteScheduler(host=host, port=port) def create_worker(self, scheduler, worker_processes, assistant=False): return worker.Worker( scheduler=scheduler, worker_processes=worker_processes, assistant=assistant) class Interface(object): def parse(self): raise NotImplementedError @staticmethod def run(tasks, worker_scheduler_factory=None, override_defaults=None): """ :param tasks: :param worker_scheduler_factory: :param override_defaults: :return: True if all tasks and their dependencies were successfully run (or already completed); False if any error occurred. """ if worker_scheduler_factory is None: worker_scheduler_factory = WorkerSchedulerFactory() if override_defaults is None: override_defaults = {} env_params = core(**override_defaults) # search for logging configuration path first on the command line, then # in the application config file logging_conf = env_params.logging_conf_file if logging_conf is not None and not os.path.exists(logging_conf): raise Exception( "Error: Unable to locate specified logging configuration file!" ) if not configuration.get_config().getboolean( 'core', 'no_configure_logging', False): setup_interface_logging(logging_conf) if (not env_params.no_lock and not(lock.acquire_for(env_params.lock_pid_dir, env_params.lock_size))): sys.exit(1) if env_params.local_scheduler: sch = worker_scheduler_factory.create_local_scheduler() else: sch = worker_scheduler_factory.create_remote_scheduler( host=env_params.scheduler_host, port=env_params.scheduler_port) w = worker_scheduler_factory.create_worker( scheduler=sch, worker_processes=env_params.workers, assistant=env_params.assistant) success = True for t in tasks: success &= w.add(t, env_params.parallel_scheduling) logger = logging.getLogger('luigi-interface') logger.info('Done scheduling tasks') if env_params.workers != 0: success &= w.run() w.stop() return success # Simple unweighted Levenshtein distance def _editdistance(a, b): r0 = range(0, len(b) + 1) r1 = [0] * (len(b) + 1) for i in range(0, len(a)): r1[0] = i + 1 for j in range(0, len(b)): c = 0 if a[i] is b[j] else 1 r1[j + 1] = min(r1[j] + 1, r0[j + 1] + 1, r0[j] + c) r0 = r1[:] return r1[len(b)] def error_task_names(task_name, task_names): weighted_tasks = [(_editdistance(task_name, task_name_2), task_name_2) for task_name_2 in task_names] ordered_tasks = sorted(weighted_tasks, key=lambda pair: pair[0]) candidates = [task for (dist, task) in ordered_tasks if dist <= 5 and dist < len(task)] display_string = "" if candidates: display_string = "No task %s. Did you mean:\n%s" % (task_name, '\n'.join(candidates)) else: display_string = "No task %s." % task_name raise SystemExit(display_string) def add_task_parameters(parser, task_cls, optparse=False): for param_name, param in task_cls.get_params(): param.add_to_cmdline_parser(parser, param_name, task_cls.task_family, optparse=optparse, glob=False) def get_global_parameters(): seen_params = set() for task_name, is_without_section, param_name, param in Register.get_all_params(): if param in seen_params: continue seen_params.add(param) yield task_name, is_without_section, param_name, param def add_global_parameters(parser, optparse=False): for task_name, is_without_section, param_name, param in get_global_parameters(): param.add_to_cmdline_parser(parser, param_name, task_name, optparse=optparse, glob=True, is_without_section=is_without_section) def get_task_parameters(task_cls, args): # Parse a str->str dict to the correct types params = {} for param_name, param in task_cls.get_params(): param.parse_from_args(param_name, task_cls.task_family, args, params) return params def set_global_parameters(args): # Note that this is not side effect free for task_name, is_without_section, param_name, param in get_global_parameters(): param.set_global_from_args(param_name, task_name, args, is_without_section=is_without_section) class ArgParseInterface(Interface): """ Takes the task as the command, with parameters specific to it. """ def parse_task(self, cmdline_args=None, main_task_cls=None): if cmdline_args is None: cmdline_args = sys.argv[1:] parser = argparse.ArgumentParser() add_global_parameters(parser) if main_task_cls: add_task_parameters(parser, main_task_cls) args = parser.parse_args(args=cmdline_args) task_cls = main_task_cls else: task_names = Register.task_names() # Parse global arguments and pull out the task name. # We used to do this using subparsers+command, but some issues with # argparse across different versions of Python (2.7.9) made it hard. args, unknown = parser.parse_known_args(args=[a for a in cmdline_args if a != '--help']) if len(unknown) == 0: # In case it included a --help argument, run again parser.parse_known_args(args=cmdline_args) raise SystemExit('No task specified') task_name = unknown[0] if task_name not in task_names: error_task_names(task_name, task_names) task_cls = Register.get_task_cls(task_name) # Add a subparser to parse task-specific arguments subparsers = parser.add_subparsers(dest='command') subparser = subparsers.add_parser(task_name) # Add both task and global params here so that we can support both: # test.py --global-param xyz Test --n 42 # test.py Test --n 42 --global-param xyz add_global_parameters(subparser) add_task_parameters(subparser, task_cls) # Workaround for bug in argparse for Python 2.7.9 # See https://mail.python.org/pipermail/python-dev/2015-January/137699.html subargs = parser.parse_args(args=cmdline_args) for key, value in vars(subargs).items(): if value: # Either True (for boolean args) or non-None (everything else) setattr(args, key, value) # Notice that this is not side effect free because it might set global params set_global_parameters(args) task_params = get_task_parameters(task_cls, args) return [task_cls(**task_params)] def parse(self, cmdline_args=None, main_task_cls=None): return self.parse_task(cmdline_args, main_task_cls) class DynamicArgParseInterface(ArgParseInterface): """ Uses --module as a way to load modules dynamically Usage: .. code-block:: console python whatever.py --module foo_module FooTask --blah xyz --x 123 This will dynamically import foo_module and then try to create FooTask from this. """ def parse(self, cmdline_args=None, main_task_cls=None): if cmdline_args is None: cmdline_args = sys.argv[1:] parser = argparse.ArgumentParser() add_global_parameters(parser) args, unknown = parser.parse_known_args(args=[a for a in cmdline_args if a != '--help']) module = args.module __import__(module) return self.parse_task(cmdline_args, main_task_cls) class PassThroughOptionParser(optparse.OptionParser): """ An unknown option pass-through implementation of OptionParser. When unknown arguments are encountered, bundle with largs and try again, until rargs is depleted. sys.exit(status) will still be called if a known argument is passed incorrectly (e.g. missing arguments or bad argument types, etc.) """ def _process_args(self, largs, rargs, values): while rargs: try: optparse.OptionParser._process_args(self, largs, rargs, values) except (optparse.BadOptionError, optparse.AmbiguousOptionError) as e: largs.append(e.opt_str) class OptParseInterface(Interface): """ Supported for legacy reasons where it's necessary to interact with an existing parser. Takes the task using --task. All parameters to all possible tasks will be defined globally in a big unordered soup. """ def __init__(self, existing_optparse): self.__existing_optparse = existing_optparse def parse(self, cmdline_args=None, main_task_cls=None): parser = PassThroughOptionParser() def add_task_option(p): if main_task_cls: p.add_option('--task', help='Task to run (one of ' + Register.tasks_str() + ') [default: %default]', default=main_task_cls.task_family) else: p.add_option('--task', help='Task to run (one of %s)' % Register.tasks_str()) add_global_parameters(parser, optparse=True) add_task_option(parser) options, args = parser.parse_args(args=cmdline_args) task_cls_name = options.task if self.__existing_optparse: parser = self.__existing_optparse else: parser = optparse.OptionParser() add_task_option(parser) task_cls = Register.get_task_cls(task_cls_name) # Register all parameters as a big mess add_global_parameters(parser, optparse=True) add_task_parameters(parser, task_cls, optparse=True) # Parse and run options, args = parser.parse_args(args=cmdline_args) set_global_parameters(options) task_params = get_task_parameters(task_cls, options) return [task_cls(**task_params)] def run(cmdline_args=None, existing_optparse=None, use_optparse=False, main_task_cls=None, worker_scheduler_factory=None, use_dynamic_argparse=False, local_scheduler=False): """ Run from cmdline. The default parser uses argparse however, for legacy reasons, we support optparse that optionally allows for overriding an existing option parser with new args. :param cmdline_args: :param existing_optparse: :param use_optparse: :param main_task_cls: :param worker_scheduler_factory: :param use_dynamic_argparse: :param local_scheduler: """ if use_optparse: interface = OptParseInterface(existing_optparse) elif use_dynamic_argparse: interface = DynamicArgParseInterface() else: interface = ArgParseInterface() tasks = interface.parse(cmdline_args, main_task_cls=main_task_cls) override_defaults = {} if local_scheduler: override_defaults['local_scheduler'] = True return interface.run(tasks, worker_scheduler_factory, override_defaults=override_defaults) def build(tasks, worker_scheduler_factory=None, **env_params): """ Run internally, bypassing the cmdline parsing. Useful if you have some luigi code that you want to run internally. Example: .. code-block:: python luigi.build([MyTask1(), MyTask2()], local_scheduler=True) One notable difference is that `build` defaults to not using the identical process lock. Otherwise, `build` would only be callable once from each process. :param tasks: :param worker_scheduler_factory: :param env_params: :return: """ if "no_lock" not in env_params: # TODO(erikbern): should we really override args here? env_params["no_lock"] = True Interface.run(tasks, worker_scheduler_factory, env_params) # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ This module contains the bindings for command line integration and dynamic loading of tasks """ import argparse import logging import logging.config import optparse import os import sys import tempfile from luigi import configuration from luigi import lock from luigi import parameter from luigi import rpc from luigi import scheduler from luigi import task from luigi import worker from luigi.task_register import Register def setup_interface_logging(conf_file=None): # use a variable in the function object to determine if it has run before if getattr(setup_interface_logging, "has_run", False): return if conf_file is None: logger = logging.getLogger('luigi-interface') logger.setLevel(logging.DEBUG) stream_handler = logging.StreamHandler() stream_handler.setLevel(logging.DEBUG) formatter = logging.Formatter('%(levelname)s: %(message)s') stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) else: logging.config.fileConfig(conf_file, disable_existing_loggers=False) setup_interface_logging.has_run = True class core(task.Config): ''' Keeps track of a bunch of environment params. Uses the internal luigi parameter mechanism. The nice thing is that we can instantiate this class and get an object with all the environment variables set. This is arguably a bit of a hack. ''' use_cmdline_section = False local_scheduler = parameter.BoolParameter( default=False, description='Use local scheduling') scheduler_host = parameter.Parameter( default='localhost', description='Hostname of machine running remote scheduler', config_path=dict(section='core', name='default-scheduler-host')) scheduler_port = parameter.IntParameter( default=8082, description='Port of remote scheduler api process', config_path=dict(section='core', name='default-scheduler-port')) lock_size = parameter.IntParameter( default=1, description="Maximum number of workers running the same command") no_lock = parameter.BoolParameter( default=False, description='Ignore if similar process is already running') lock_pid_dir = parameter.Parameter( default=os.path.join(tempfile.gettempdir(), 'luigi'), description='Directory to store the pid file') workers = parameter.IntParameter( default=1, description='Maximum number of parallel tasks to run') logging_conf_file = parameter.Parameter( default=None, description='Configuration file for logging') module = parameter.Parameter( default=None, description='Used for dynamic loading of modules') # see DynamicArgParseInterface parallel_scheduling = parameter.BoolParameter( default=False, description='Use multiprocessing to do scheduling in parallel.') assistant = parameter.BoolParameter( default=False, description='Run any task from the scheduler.') class WorkerSchedulerFactory(object): def create_local_scheduler(self): return scheduler.CentralPlannerScheduler(prune_on_get_work=True) def create_remote_scheduler(self, host, port): return rpc.RemoteScheduler(host=host, port=port) def create_worker(self, scheduler, worker_processes, assistant=False): return worker.Worker( scheduler=scheduler, worker_processes=worker_processes, assistant=assistant) class Interface(object): def parse(self): raise NotImplementedError @staticmethod def run(tasks, worker_scheduler_factory=None, override_defaults=None): """ :param tasks: :param worker_scheduler_factory: :param override_defaults: :return: True if all tasks and their dependencies were successfully run (or already completed); False if any error occurred. """ if worker_scheduler_factory is None: worker_scheduler_factory = WorkerSchedulerFactory() if override_defaults is None: override_defaults = {} env_params = core(**override_defaults) # search for logging configuration path first on the command line, then # in the application config file logging_conf = env_params.logging_conf_file if logging_conf is not None and not os.path.exists(logging_conf): raise Exception( "Error: Unable to locate specified logging configuration file!" ) if not configuration.get_config().getboolean( 'core', 'no_configure_logging', False): setup_interface_logging(logging_conf) if (not env_params.no_lock and not(lock.acquire_for(env_params.lock_pid_dir, env_params.lock_size))): sys.exit(1) if env_params.local_scheduler: sch = worker_scheduler_factory.create_local_scheduler() else: sch = worker_scheduler_factory.create_remote_scheduler( host=env_params.scheduler_host, port=env_params.scheduler_port) w = worker_scheduler_factory.create_worker( scheduler=sch, worker_processes=env_params.workers, assistant=env_params.assistant) success = True for t in tasks: success &= w.add(t, env_params.parallel_scheduling) logger = logging.getLogger('luigi-interface') logger.info('Done scheduling tasks') if env_params.workers != 0: success &= w.run() w.stop() return success # Simple unweighted Levenshtein distance def _editdistance(a, b): r0 = range(0, len(b) + 1) r1 = [0] * (len(b) + 1) for i in range(0, len(a)): r1[0] = i + 1 for j in range(0, len(b)): c = 0 if a[i] is b[j] else 1 r1[j + 1] = min(r1[j] + 1, r0[j + 1] + 1, r0[j] + c) r0 = r1[:] return r1[len(b)] def error_task_names(task_name, task_names): weighted_tasks = [(_editdistance(task_name, task_name_2), task_name_2) for task_name_2 in task_names] ordered_tasks = sorted(weighted_tasks, key=lambda pair: pair[0]) candidates = [task for (dist, task) in ordered_tasks if dist <= 5 and dist < len(task)] display_string = "" if candidates: display_string = "No task %s. Did you mean:\n%s" % (task_name, '\n'.join(candidates)) else: display_string = "No task %s." % task_name raise SystemExit(display_string) def add_task_parameters(parser, task_cls, optparse=False): for param_name, param in task_cls.get_params(): param.add_to_cmdline_parser(parser, param_name, task_cls.task_family, optparse=optparse, glob=False) def get_global_parameters(): seen_params = set() for task_name, is_without_section, param_name, param in Register.get_all_params(): if param in seen_params: continue seen_params.add(param) yield task_name, is_without_section, param_name, param def add_global_parameters(parser, optparse=False): for task_name, is_without_section, param_name, param in get_global_parameters(): param.add_to_cmdline_parser(parser, param_name, task_name, optparse=optparse, glob=True, is_without_section=is_without_section) def get_task_parameters(task_cls, args): # Parse a str->str dict to the correct types params = {} for param_name, param in task_cls.get_params(): param.parse_from_args(param_name, task_cls.task_family, args, params) return params def set_global_parameters(args): # Note that this is not side effect free for task_name, is_without_section, param_name, param in get_global_parameters(): param.set_global_from_args(param_name, task_name, args, is_without_section=is_without_section) class ArgParseInterface(Interface): """ Takes the task as the command, with parameters specific to it. """ def parse_task(self, cmdline_args=None, main_task_cls=None): if cmdline_args is None: cmdline_args = sys.argv[1:] parser = argparse.ArgumentParser() add_global_parameters(parser) if main_task_cls: add_task_parameters(parser, main_task_cls) args = parser.parse_args(args=cmdline_args) task_cls = main_task_cls else: task_names = Register.task_names() # Parse global arguments and pull out the task name. # We used to do this using subparsers+command, but some issues with # argparse across different versions of Python (2.7.9) made it hard. args, unknown = parser.parse_known_args(args=[a for a in cmdline_args if a != '--help']) if len(unknown) == 0: # In case it included a --help argument, run again parser.parse_known_args(args=cmdline_args) raise SystemExit('No task specified') task_name = unknown[0] if task_name not in task_names: error_task_names(task_name, task_names) task_cls = Register.get_task_cls(task_name) # Add a subparser to parse task-specific arguments subparsers = parser.add_subparsers(dest='command') subparser = subparsers.add_parser(task_name) # Add both task and global params here so that we can support both: # test.py --global-param xyz Test --n 42 # test.py Test --n 42 --global-param xyz add_global_parameters(subparser) add_task_parameters(subparser, task_cls) # Workaround for bug in argparse for Python 2.7.9 # See https://mail.python.org/pipermail/python-dev/2015-January/137699.html subargs = parser.parse_args(args=cmdline_args) for key, value in vars(subargs).items(): if value: # Either True (for boolean args) or non-None (everything else) setattr(args, key, value) # Notice that this is not side effect free because it might set global params set_global_parameters(args) task_params = get_task_parameters(task_cls, args) return [task_cls(**task_params)] def parse(self, cmdline_args=None, main_task_cls=None): return self.parse_task(cmdline_args, main_task_cls) class DynamicArgParseInterface(ArgParseInterface): """ Uses --module as a way to load modules dynamically Usage: .. code-block:: console python whatever.py --module foo_module FooTask --blah xyz --x 123 This will dynamically import foo_module and then try to create FooTask from this. """ def parse(self, cmdline_args=None, main_task_cls=None): if cmdline_args is None: cmdline_args = sys.argv[1:] parser = argparse.ArgumentParser() add_global_parameters(parser) args, unknown = parser.parse_known_args(args=[a for a in cmdline_args if a != '--help']) module = args.module __import__(module) return self.parse_task(cmdline_args, main_task_cls) class PassThroughOptionParser(optparse.OptionParser): """ An unknown option pass-through implementation of OptionParser. When unknown arguments are encountered, bundle with largs and try again, until rargs is depleted. sys.exit(status) will still be called if a known argument is passed incorrectly (e.g. missing arguments or bad argument types, etc.) """ def _process_args(self, largs, rargs, values): while rargs: try: optparse.OptionParser._process_args(self, largs, rargs, values) except (optparse.BadOptionError, optparse.AmbiguousOptionError) as e: largs.append(e.opt_str) class OptParseInterface(Interface): """ Supported for legacy reasons where it's necessary to interact with an existing parser. Takes the task using --task. All parameters to all possible tasks will be defined globally in a big unordered soup. """ def __init__(self, existing_optparse): self.__existing_optparse = existing_optparse def parse(self, cmdline_args=None, main_task_cls=None): parser = PassThroughOptionParser() def add_task_option(p): if main_task_cls: p.add_option('--task', help='Task to run (one of ' + Register.tasks_str() + ') [default: %default]', default=main_task_cls.task_family) else: p.add_option('--task', help='Task to run (one of %s)' % Register.tasks_str()) add_global_parameters(parser, optparse=True) add_task_option(parser) options, args = parser.parse_args(args=cmdline_args) task_cls_name = options.task if self.__existing_optparse: parser = self.__existing_optparse else: parser = optparse.OptionParser() add_task_option(parser) task_cls = Register.get_task_cls(task_cls_name) # Register all parameters as a big mess add_global_parameters(parser, optparse=True) add_task_parameters(parser, task_cls, optparse=True) # Parse and run options, args = parser.parse_args(args=cmdline_args) set_global_parameters(options) task_params = get_task_parameters(task_cls, options) return [task_cls(**task_params)] def run(cmdline_args=None, existing_optparse=None, use_optparse=False, main_task_cls=None, worker_scheduler_factory=None, use_dynamic_argparse=False, local_scheduler=False): """ Run from cmdline. The default parser uses argparse however, for legacy reasons, we support optparse that optionally allows for overriding an existing option parser with new args. :param cmdline_args: :param existing_optparse: :param use_optparse: :param main_task_cls: :param worker_scheduler_factory: :param use_dynamic_argparse: :param local_scheduler: """ if use_optparse: interface = OptParseInterface(existing_optparse) elif use_dynamic_argparse: interface = DynamicArgParseInterface() else: interface = ArgParseInterface() tasks = interface.parse(cmdline_args, main_task_cls=main_task_cls) override_defaults = {} if local_scheduler: override_defaults['local_scheduler'] = True return interface.run(tasks, worker_scheduler_factory, override_defaults=override_defaults) def build(tasks, worker_scheduler_factory=None, **env_params): """ Run internally, bypassing the cmdline parsing. Useful if you have some luigi code that you want to run internally. Example: .. code-block:: python luigi.build([MyTask1(), MyTask2()], local_scheduler=True) One notable difference is that `build` defaults to not using the identical process lock. Otherwise, `build` would only be callable once from each process. :param tasks: :param worker_scheduler_factory: :param env_params: :return: """ if "no_lock" not in env_params: # TODO(erikbern): should we really override args here? env_params["no_lock"] = True Interface.run(tasks, worker_scheduler_factory, env_params)
BugsInPy/BugsInPy/temp/projects/luigi/bug-23-fixed/luigi/luigi/interface.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-23-buggy/luigi/luigi/interface.py
luigi-bug-1
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Simple REST server that takes commands in a JSON payload Interface to the :py:class:`~luigi.scheduler.Scheduler` class. See :doc:`/central_scheduler` for more info. """ # # Description: Added codes for visualization of how long each task takes # running-time until it reaches the next status (failed or done) # At "{base_url}/tasklist", all completed(failed or done) tasks are shown. # At "{base_url}/tasklist", a user can select one specific task to see # how its running-time has changed over time. # At "{base_url}/tasklist/{task_name}", it visualizes a multi-bar graph # that represents the changes of the running-time for a selected task # up to the next status (failed or done). # This visualization let us know how the running-time of the specific task # has changed over time. # # Copyright 2015 Naver Corp. # Author Yeseul Park ([email protected]) # import atexit import datetime import json import logging import os import signal import sys import time import pkg_resources import tornado.httpserver import tornado.ioloop import tornado.netutil import tornado.web from luigi import Config, parameter from luigi.scheduler import Scheduler, RPC_METHODS logger = logging.getLogger("luigi.server") class cors(Config): enabled = parameter.BoolParameter( default=False, description='Enables CORS support.') allowed_origins = parameter.ListParameter( default=[], description='A list of allowed origins. Used only if `allow_any_origin` is false.') allow_any_origin = parameter.BoolParameter( default=False, description='Accepts requests from any origin.') allow_null_origin = parameter.BoolParameter( default=False, description='Allows the request to set `null` value of the `Origin` header.') max_age = parameter.IntParameter( default=86400, description='Content of `Access-Control-Max-Age`.') allowed_methods = parameter.Parameter( default='GET, OPTIONS', description='Content of `Access-Control-Allow-Methods`.') allowed_headers = parameter.Parameter( default='Accept, Content-Type, Origin', description='Content of `Access-Control-Allow-Headers`.') exposed_headers = parameter.Parameter( default='', description='Content of `Access-Control-Expose-Headers`.') allow_credentials = parameter.BoolParameter( default=False, description='Indicates that the actual request can include user credentials.') def __init__(self, *args, **kwargs): super(cors, self).__init__(*args, **kwargs) self.allowed_origins = set(i for i in self.allowed_origins if i not in ['*', 'null']) class RPCHandler(tornado.web.RequestHandler): """ Handle remote scheduling calls using rpc.RemoteSchedulerResponder. """ def __init__(self, *args, **kwargs): super(RPCHandler, self).__init__(*args, **kwargs) self._cors_config = cors() def initialize(self, scheduler): self._scheduler = scheduler def options(self, *args): if self._cors_config.enabled: self._handle_cors_preflight() self.set_status(204) self.finish() def get(self, method): if method not in RPC_METHODS: self.send_error(404) return payload = self.get_argument('data', default="{}") arguments = json.loads(payload) if hasattr(self._scheduler, method): result = getattr(self._scheduler, method)(**arguments) if self._cors_config.enabled: self._handle_cors() self.write({"response": result}) # wrap all json response in a dictionary else: self.send_error(404) post = get def _handle_cors_preflight(self): origin = self.request.headers.get('Origin') if not origin: return if origin == 'null': if self._cors_config.allow_null_origin: self.set_header('Access-Control-Allow-Origin', 'null') self._set_other_cors_headers() else: if self._cors_config.allow_any_origin: self.set_header('Access-Control-Allow-Origin', '*') self._set_other_cors_headers() elif origin in self._cors_config.allowed_origins: self.set_header('Access-Control-Allow-Origin', origin) self._set_other_cors_headers() def _handle_cors(self): origin = self.request.headers.get('Origin') if not origin: return if origin == 'null': if self._cors_config.allow_null_origin: self.set_header('Access-Control-Allow-Origin', 'null') else: if self._cors_config.allow_any_origin: self.set_header('Access-Control-Allow-Origin', '*') elif origin in self._cors_config.allowed_origins: self.set_header('Access-Control-Allow-Origin', origin) self.set_header('Vary', 'Origin') def _set_other_cors_headers(self): self.set_header('Access-Control-Max-Age', str(self._cors_config.max_age)) self.set_header('Access-Control-Allow-Methods', self._cors_config.allowed_methods) self.set_header('Access-Control-Allow-Headers', self._cors_config.allowed_headers) if self._cors_config.allow_credentials: self.set_header('Access-Control-Allow-Credentials', 'true') if self._cors_config.exposed_headers: self.set_header('Access-Control-Expose-Headers', self._cors_config.exposed_headers) class BaseTaskHistoryHandler(tornado.web.RequestHandler): def initialize(self, scheduler): self._scheduler = scheduler def get_template_path(self): return pkg_resources.resource_filename(__name__, 'templates') class AllRunHandler(BaseTaskHistoryHandler): def get(self): all_tasks = self._scheduler.task_history.find_all_runs() tasknames = [task.name for task in all_tasks] # show all tasks with their name list to be selected # why all tasks? the duration of the event history of a selected task # can be more than 24 hours. self.render("menu.html", tasknames=tasknames) class SelectedRunHandler(BaseTaskHistoryHandler): def get(self, name): statusResults = {} taskResults = [] # get all tasks that has been updated all_tasks = self._scheduler.task_history.find_all_runs() # get events history for all tasks all_tasks_event_history = self._scheduler.task_history.find_all_events() # build the dictionary tasks with index: id, value: task_name tasks = {task.id: str(task.name) for task in all_tasks} for task in all_tasks_event_history: # if the name of user-selected task is in tasks, get its task_id if tasks.get(task.task_id) == str(name): status = str(task.event_name) if status not in statusResults: statusResults[status] = [] # append the id, task_id, ts, y with 0, next_process with null # for the status(running/failed/done) of the selected task statusResults[status].append(({ 'id': str(task.id), 'task_id': str(task.task_id), 'x': from_utc(str(task.ts)), 'y': 0, 'next_process': ''})) # append the id, task_name, task_id, status, datetime, timestamp # for the selected task taskResults.append({ 'id': str(task.id), 'taskName': str(name), 'task_id': str(task.task_id), 'status': str(task.event_name), 'datetime': str(task.ts), 'timestamp': from_utc(str(task.ts))}) statusResults = json.dumps(statusResults) taskResults = json.dumps(taskResults) statusResults = tornado.escape.xhtml_unescape(str(statusResults)) taskResults = tornado.escape.xhtml_unescape(str(taskResults)) self.render('history.html', name=name, statusResults=statusResults, taskResults=taskResults) def from_utc(utcTime, fmt=None): """convert UTC time string to time.struct_time: change datetime.datetime to time, return time.struct_time type""" if fmt is None: try_formats = ["%Y-%m-%d %H:%M:%S.%f", "%Y-%m-%d %H:%M:%S"] else: try_formats = [fmt] for fmt in try_formats: try: time_struct = datetime.datetime.strptime(utcTime, fmt) except ValueError: pass else: date = int(time.mktime(time_struct.timetuple())) return date else: raise ValueError("No UTC format matches {}".format(utcTime)) class RecentRunHandler(BaseTaskHistoryHandler): def get(self): tasks = self._scheduler.task_history.find_latest_runs() self.render("recent.html", tasks=tasks) class ByNameHandler(BaseTaskHistoryHandler): def get(self, name): tasks = self._scheduler.task_history.find_all_by_name(name) self.render("recent.html", tasks=tasks) class ByIdHandler(BaseTaskHistoryHandler): def get(self, id): task = self._scheduler.task_history.find_task_by_id(id) self.render("show.html", task=task) class ByParamsHandler(BaseTaskHistoryHandler): def get(self, name): payload = self.get_argument('data', default="{}") arguments = json.loads(payload) tasks = self._scheduler.task_history.find_all_by_parameters(name, session=None, **arguments) self.render("recent.html", tasks=tasks) class RootPathHandler(BaseTaskHistoryHandler): def get(self): self.redirect("/static/visualiser/index.html") class MetricsHandler(tornado.web.RequestHandler): def initialize(self, scheduler): self._scheduler = scheduler def get(self): metrics = self._scheduler._state._metrics_collector.generate_latest() if metrics: metrics.configure_http_handler(self) self.write(metrics) def app(scheduler): settings = {"static_path": os.path.join(os.path.dirname(__file__), "static"), "unescape": tornado.escape.xhtml_unescape, "compress_response": True, } handlers = [ (r'/api/(.*)', RPCHandler, {"scheduler": scheduler}), (r'/', RootPathHandler, {'scheduler': scheduler}), (r'/tasklist', AllRunHandler, {'scheduler': scheduler}), (r'/tasklist/(.*?)', SelectedRunHandler, {'scheduler': scheduler}), (r'/history', RecentRunHandler, {'scheduler': scheduler}), (r'/history/by_name/(.*?)', ByNameHandler, {'scheduler': scheduler}), (r'/history/by_id/(.*?)', ByIdHandler, {'scheduler': scheduler}), (r'/history/by_params/(.*?)', ByParamsHandler, {'scheduler': scheduler}), (r'/metrics', MetricsHandler, {'scheduler': scheduler}) ] api_app = tornado.web.Application(handlers, **settings) return api_app def _init_api(scheduler, api_port=None, address=None, unix_socket=None): api_app = app(scheduler) if unix_socket is not None: api_sockets = [tornado.netutil.bind_unix_socket(unix_socket)] else: api_sockets = tornado.netutil.bind_sockets(api_port, address=address) server = tornado.httpserver.HTTPServer(api_app) server.add_sockets(api_sockets) # Return the bound socket names. Useful for connecting client in test scenarios. return [s.getsockname() for s in api_sockets] def run(api_port=8082, address=None, unix_socket=None, scheduler=None): """ Runs one instance of the API server. """ if scheduler is None: scheduler = Scheduler() # load scheduler state scheduler.load() _init_api( scheduler=scheduler, api_port=api_port, address=address, unix_socket=unix_socket, ) # prune work DAG every 60 seconds pruner = tornado.ioloop.PeriodicCallback(scheduler.prune, 60000) pruner.start() def shutdown_handler(signum, frame): exit_handler() sys.exit(0) @atexit.register def exit_handler(): logger.info("Scheduler instance shutting down") scheduler.dump() stop() signal.signal(signal.SIGINT, shutdown_handler) signal.signal(signal.SIGTERM, shutdown_handler) if os.name == 'nt': signal.signal(signal.SIGBREAK, shutdown_handler) else: signal.signal(signal.SIGQUIT, shutdown_handler) logger.info("Scheduler starting up") tornado.ioloop.IOLoop.instance().start() def stop(): tornado.ioloop.IOLoop.instance().stop() if __name__ == "__main__": run() # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Simple REST server that takes commands in a JSON payload Interface to the :py:class:`~luigi.scheduler.Scheduler` class. See :doc:`/central_scheduler` for more info. """ # # Description: Added codes for visualization of how long each task takes # running-time until it reaches the next status (failed or done) # At "{base_url}/tasklist", all completed(failed or done) tasks are shown. # At "{base_url}/tasklist", a user can select one specific task to see # how its running-time has changed over time. # At "{base_url}/tasklist/{task_name}", it visualizes a multi-bar graph # that represents the changes of the running-time for a selected task # up to the next status (failed or done). # This visualization let us know how the running-time of the specific task # has changed over time. # # Copyright 2015 Naver Corp. # Author Yeseul Park ([email protected]) # import atexit import datetime import json import logging import os import signal import sys import time import pkg_resources import tornado.httpserver import tornado.ioloop import tornado.netutil import tornado.web from luigi import Config, parameter from luigi.scheduler import Scheduler, RPC_METHODS logger = logging.getLogger("luigi.server") class cors(Config): enabled = parameter.BoolParameter( default=False, description='Enables CORS support.') allowed_origins = parameter.ListParameter( default=[], description='A list of allowed origins. Used only if `allow_any_origin` is false.') allow_any_origin = parameter.BoolParameter( default=False, description='Accepts requests from any origin.') allow_null_origin = parameter.BoolParameter( default=False, description='Allows the request to set `null` value of the `Origin` header.') max_age = parameter.IntParameter( default=86400, description='Content of `Access-Control-Max-Age`.') allowed_methods = parameter.Parameter( default='GET, OPTIONS', description='Content of `Access-Control-Allow-Methods`.') allowed_headers = parameter.Parameter( default='Accept, Content-Type, Origin', description='Content of `Access-Control-Allow-Headers`.') exposed_headers = parameter.Parameter( default='', description='Content of `Access-Control-Expose-Headers`.') allow_credentials = parameter.BoolParameter( default=False, description='Indicates that the actual request can include user credentials.') def __init__(self, *args, **kwargs): super(cors, self).__init__(*args, **kwargs) self.allowed_origins = set(i for i in self.allowed_origins if i not in ['*', 'null']) class RPCHandler(tornado.web.RequestHandler): """ Handle remote scheduling calls using rpc.RemoteSchedulerResponder. """ def __init__(self, *args, **kwargs): super(RPCHandler, self).__init__(*args, **kwargs) self._cors_config = cors() def initialize(self, scheduler): self._scheduler = scheduler def options(self, *args): if self._cors_config.enabled: self._handle_cors_preflight() self.set_status(204) self.finish() def get(self, method): if method not in RPC_METHODS: self.send_error(404) return payload = self.get_argument('data', default="{}") arguments = json.loads(payload) if hasattr(self._scheduler, method): result = getattr(self._scheduler, method)(**arguments) if self._cors_config.enabled: self._handle_cors() self.write({"response": result}) # wrap all json response in a dictionary else: self.send_error(404) post = get def _handle_cors_preflight(self): origin = self.request.headers.get('Origin') if not origin: return if origin == 'null': if self._cors_config.allow_null_origin: self.set_header('Access-Control-Allow-Origin', 'null') self._set_other_cors_headers() else: if self._cors_config.allow_any_origin: self.set_header('Access-Control-Allow-Origin', '*') self._set_other_cors_headers() elif origin in self._cors_config.allowed_origins: self.set_header('Access-Control-Allow-Origin', origin) self._set_other_cors_headers() def _handle_cors(self): origin = self.request.headers.get('Origin') if not origin: return if origin == 'null': if self._cors_config.allow_null_origin: self.set_header('Access-Control-Allow-Origin', 'null') else: if self._cors_config.allow_any_origin: self.set_header('Access-Control-Allow-Origin', '*') elif origin in self._cors_config.allowed_origins: self.set_header('Access-Control-Allow-Origin', origin) self.set_header('Vary', 'Origin') def _set_other_cors_headers(self): self.set_header('Access-Control-Max-Age', str(self._cors_config.max_age)) self.set_header('Access-Control-Allow-Methods', self._cors_config.allowed_methods) self.set_header('Access-Control-Allow-Headers', self._cors_config.allowed_headers) if self._cors_config.allow_credentials: self.set_header('Access-Control-Allow-Credentials', 'true') if self._cors_config.exposed_headers: self.set_header('Access-Control-Expose-Headers', self._cors_config.exposed_headers) class BaseTaskHistoryHandler(tornado.web.RequestHandler): def initialize(self, scheduler): self._scheduler = scheduler def get_template_path(self): return pkg_resources.resource_filename(__name__, 'templates') class AllRunHandler(BaseTaskHistoryHandler): def get(self): all_tasks = self._scheduler.task_history.find_all_runs() tasknames = [task.name for task in all_tasks] # show all tasks with their name list to be selected # why all tasks? the duration of the event history of a selected task # can be more than 24 hours. self.render("menu.html", tasknames=tasknames) class SelectedRunHandler(BaseTaskHistoryHandler): def get(self, name): statusResults = {} taskResults = [] # get all tasks that has been updated all_tasks = self._scheduler.task_history.find_all_runs() # get events history for all tasks all_tasks_event_history = self._scheduler.task_history.find_all_events() # build the dictionary tasks with index: id, value: task_name tasks = {task.id: str(task.name) for task in all_tasks} for task in all_tasks_event_history: # if the name of user-selected task is in tasks, get its task_id if tasks.get(task.task_id) == str(name): status = str(task.event_name) if status not in statusResults: statusResults[status] = [] # append the id, task_id, ts, y with 0, next_process with null # for the status(running/failed/done) of the selected task statusResults[status].append(({ 'id': str(task.id), 'task_id': str(task.task_id), 'x': from_utc(str(task.ts)), 'y': 0, 'next_process': ''})) # append the id, task_name, task_id, status, datetime, timestamp # for the selected task taskResults.append({ 'id': str(task.id), 'taskName': str(name), 'task_id': str(task.task_id), 'status': str(task.event_name), 'datetime': str(task.ts), 'timestamp': from_utc(str(task.ts))}) statusResults = json.dumps(statusResults) taskResults = json.dumps(taskResults) statusResults = tornado.escape.xhtml_unescape(str(statusResults)) taskResults = tornado.escape.xhtml_unescape(str(taskResults)) self.render('history.html', name=name, statusResults=statusResults, taskResults=taskResults) def from_utc(utcTime, fmt=None): """convert UTC time string to time.struct_time: change datetime.datetime to time, return time.struct_time type""" if fmt is None: try_formats = ["%Y-%m-%d %H:%M:%S.%f", "%Y-%m-%d %H:%M:%S"] else: try_formats = [fmt] for fmt in try_formats: try: time_struct = datetime.datetime.strptime(utcTime, fmt) except ValueError: pass else: date = int(time.mktime(time_struct.timetuple())) return date else: raise ValueError("No UTC format matches {}".format(utcTime)) class RecentRunHandler(BaseTaskHistoryHandler): def get(self): tasks = self._scheduler.task_history.find_latest_runs() self.render("recent.html", tasks=tasks) class ByNameHandler(BaseTaskHistoryHandler): def get(self, name): tasks = self._scheduler.task_history.find_all_by_name(name) self.render("recent.html", tasks=tasks) class ByIdHandler(BaseTaskHistoryHandler): def get(self, id): task = self._scheduler.task_history.find_task_by_id(id) self.render("show.html", task=task) class ByParamsHandler(BaseTaskHistoryHandler): def get(self, name): payload = self.get_argument('data', default="{}") arguments = json.loads(payload) tasks = self._scheduler.task_history.find_all_by_parameters(name, session=None, **arguments) self.render("recent.html", tasks=tasks) class RootPathHandler(BaseTaskHistoryHandler): def get(self): self.redirect("/static/visualiser/index.html") class MetricsHandler(tornado.web.RequestHandler): def initialize(self, scheduler): self._scheduler = scheduler def get(self): metrics_collector = self._scheduler._state._metrics_collector metrics = metrics_collector.generate_latest() if metrics: metrics_collector.configure_http_handler(self) self.write(metrics) def app(scheduler): settings = {"static_path": os.path.join(os.path.dirname(__file__), "static"), "unescape": tornado.escape.xhtml_unescape, "compress_response": True, } handlers = [ (r'/api/(.*)', RPCHandler, {"scheduler": scheduler}), (r'/', RootPathHandler, {'scheduler': scheduler}), (r'/tasklist', AllRunHandler, {'scheduler': scheduler}), (r'/tasklist/(.*?)', SelectedRunHandler, {'scheduler': scheduler}), (r'/history', RecentRunHandler, {'scheduler': scheduler}), (r'/history/by_name/(.*?)', ByNameHandler, {'scheduler': scheduler}), (r'/history/by_id/(.*?)', ByIdHandler, {'scheduler': scheduler}), (r'/history/by_params/(.*?)', ByParamsHandler, {'scheduler': scheduler}), (r'/metrics', MetricsHandler, {'scheduler': scheduler}) ] api_app = tornado.web.Application(handlers, **settings) return api_app def _init_api(scheduler, api_port=None, address=None, unix_socket=None): api_app = app(scheduler) if unix_socket is not None: api_sockets = [tornado.netutil.bind_unix_socket(unix_socket)] else: api_sockets = tornado.netutil.bind_sockets(api_port, address=address) server = tornado.httpserver.HTTPServer(api_app) server.add_sockets(api_sockets) # Return the bound socket names. Useful for connecting client in test scenarios. return [s.getsockname() for s in api_sockets] def run(api_port=8082, address=None, unix_socket=None, scheduler=None): """ Runs one instance of the API server. """ if scheduler is None: scheduler = Scheduler() # load scheduler state scheduler.load() _init_api( scheduler=scheduler, api_port=api_port, address=address, unix_socket=unix_socket, ) # prune work DAG every 60 seconds pruner = tornado.ioloop.PeriodicCallback(scheduler.prune, 60000) pruner.start() def shutdown_handler(signum, frame): exit_handler() sys.exit(0) @atexit.register def exit_handler(): logger.info("Scheduler instance shutting down") scheduler.dump() stop() signal.signal(signal.SIGINT, shutdown_handler) signal.signal(signal.SIGTERM, shutdown_handler) if os.name == 'nt': signal.signal(signal.SIGBREAK, shutdown_handler) else: signal.signal(signal.SIGQUIT, shutdown_handler) logger.info("Scheduler starting up") tornado.ioloop.IOLoop.instance().start() def stop(): tornado.ioloop.IOLoop.instance().stop() if __name__ == "__main__": run()
BugsInPy/BugsInPy/temp/projects/luigi/bug-1-fixed/luigi/luigi/server.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-1-buggy/luigi/luigi/server.py
luigi-bug-7
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The system for scheduling tasks and executing them in order. Deals with dependencies, priorities, resources, etc. The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them. See :doc:`/central_scheduler` for more info. """ import collections import inspect import json from luigi.batch_notifier import BatchNotifier try: import cPickle as pickle except ImportError: import pickle import functools import hashlib import itertools import logging import os import re import time from luigi import six from luigi import configuration from luigi import notifications from luigi import parameter from luigi import task_history as history from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN, \ BATCH_RUNNING from luigi.task import Config logger = logging.getLogger(__name__) UPSTREAM_RUNNING = 'UPSTREAM_RUNNING' UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT' UPSTREAM_FAILED = 'UPSTREAM_FAILED' UPSTREAM_DISABLED = 'UPSTREAM_DISABLED' UPSTREAM_SEVERITY_ORDER = ( '', UPSTREAM_RUNNING, UPSTREAM_MISSING_INPUT, UPSTREAM_FAILED, UPSTREAM_DISABLED, ) UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index STATUS_TO_UPSTREAM_MAP = { FAILED: UPSTREAM_FAILED, RUNNING: UPSTREAM_RUNNING, BATCH_RUNNING: UPSTREAM_RUNNING, PENDING: UPSTREAM_MISSING_INPUT, DISABLED: UPSTREAM_DISABLED, } WORKER_STATE_DISABLED = 'disabled' WORKER_STATE_ACTIVE = 'active' TASK_FAMILY_RE = re.compile(r'([^(_]+)[(_]') RPC_METHODS = {} _retry_policy_fields = [ "retry_count", "disable_hard_timeout", "disable_window", ] RetryPolicy = collections.namedtuple("RetryPolicy", _retry_policy_fields) def _get_empty_retry_policy(): return RetryPolicy(*[None] * len(_retry_policy_fields)) def rpc_method(**request_args): def _rpc_method(fn): # If request args are passed, return this function again for use as # the decorator function with the request args attached. fn_args = inspect.getargspec(fn) assert not fn_args.varargs assert fn_args.args[0] == 'self' all_args = fn_args.args[1:] defaults = dict(zip(reversed(all_args), reversed(fn_args.defaults or ()))) required_args = frozenset(arg for arg in all_args if arg not in defaults) fn_name = fn.__name__ @functools.wraps(fn) def rpc_func(self, *args, **kwargs): actual_args = defaults.copy() actual_args.update(dict(zip(all_args, args))) actual_args.update(kwargs) if not all(arg in actual_args for arg in required_args): raise TypeError('{} takes {} arguments ({} given)'.format( fn_name, len(all_args), len(actual_args))) return self._request('/api/{}'.format(fn_name), actual_args, **request_args) RPC_METHODS[fn_name] = rpc_func return fn return _rpc_method class scheduler(Config): # TODO(erikbern): the config_path is needed for backwards compatilibity. We # should drop the compatibility at some point retry_delay = parameter.FloatParameter(default=900.0) remove_delay = parameter.FloatParameter(default=600.0) worker_disconnect_delay = parameter.FloatParameter(default=60.0) state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle') batch_emails = parameter.BoolParameter(default=False, description="Send e-mails in batches rather than immediately") # Jobs are disabled if we see more than retry_count failures in disable_window seconds. # These disables last for disable_persist seconds. disable_window = parameter.IntParameter(default=3600, config_path=dict(section='scheduler', name='disable-window-seconds')) retry_count = parameter.IntParameter(default=999999999, config_path=dict(section='scheduler', name='disable_failures')) disable_hard_timeout = parameter.IntParameter(default=999999999, config_path=dict(section='scheduler', name='disable-hard-timeout')) disable_persist = parameter.IntParameter(default=86400, config_path=dict(section='scheduler', name='disable-persist-seconds')) max_shown_tasks = parameter.IntParameter(default=100000) max_graph_nodes = parameter.IntParameter(default=100000) record_task_history = parameter.BoolParameter(default=False) prune_on_get_work = parameter.BoolParameter(default=False) def _get_retry_policy(self): return RetryPolicy(self.retry_count, self.disable_hard_timeout, self.disable_window) class Failures(object): """ This class tracks the number of failures in a given time window. Failures added are marked with the current timestamp, and this class counts the number of failures in a sliding time window ending at the present. """ def __init__(self, window): """ Initialize with the given window. :param window: how long to track failures for, as a float (number of seconds). """ self.window = window self.failures = collections.deque() self.first_failure_time = None def add_failure(self): """ Add a failure event with the current timestamp. """ failure_time = time.time() if not self.first_failure_time: self.first_failure_time = failure_time self.failures.append(failure_time) def num_failures(self): """ Return the number of failures in the window. """ min_time = time.time() - self.window while self.failures and self.failures[0] < min_time: self.failures.popleft() return len(self.failures) def clear(self): """ Clear the failure queue. """ self.failures.clear() def _get_default(x, default): if x is not None: return x else: return default class OrderedSet(collections.MutableSet): """ Standard Python OrderedSet recipe found at http://code.activestate.com/recipes/576694/ Modified to include a peek function to get the last element """ def __init__(self, iterable=None): self.end = end = [] end += [None, end, end] # sentinel node for doubly linked list self.map = {} # key --> [key, prev, next] if iterable is not None: self |= iterable def __len__(self): return len(self.map) def __contains__(self, key): return key in self.map def add(self, key): if key not in self.map: end = self.end curr = end[1] curr[2] = end[1] = self.map[key] = [key, curr, end] def discard(self, key): if key in self.map: key, prev, next = self.map.pop(key) prev[2] = next next[1] = prev def __iter__(self): end = self.end curr = end[2] while curr is not end: yield curr[0] curr = curr[2] def __reversed__(self): end = self.end curr = end[1] while curr is not end: yield curr[0] curr = curr[1] def peek(self, last=True): if not self: raise KeyError('set is empty') key = self.end[1][0] if last else self.end[2][0] return key def pop(self, last=True): key = self.peek(last) self.discard(key) return key def __repr__(self): if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, list(self)) def __eq__(self, other): if isinstance(other, OrderedSet): return len(self) == len(other) and list(self) == list(other) return set(self) == set(other) class Task(object): def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None, params=None, tracking_url=None, status_message=None, retry_policy='notoptional'): self.id = task_id self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active) self.workers = OrderedSet() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active if deps is None: self.deps = set() else: self.deps = set(deps) self.status = status # PENDING, RUNNING, FAILED or DONE self.time = time.time() # Timestamp when task was first added self.updated = self.time self.retry = None self.remove = None self.worker_running = None # the worker id that is currently running the task or None self.time_running = None # Timestamp when picked up by worker self.expl = None self.priority = priority self.resources = _get_default(resources, {}) self.family = family self.module = module self.params = _get_default(params, {}) self.retry_policy = retry_policy self.failures = Failures(self.retry_policy.disable_window) self.tracking_url = tracking_url self.status_message = status_message self.scheduler_disable_time = None self.runnable = False self.batchable = False self.batch_id = None def __repr__(self): return "Task(%r)" % vars(self) # TODO(2017-08-10) replace this function with direct calls to batchable # this only exists for backward compatibility def is_batchable(self): try: return self.batchable except AttributeError: return False def add_failure(self): self.failures.add_failure() def has_excessive_failures(self): if self.failures.first_failure_time is not None: if (time.time() >= self.failures.first_failure_time + self.retry_policy.disable_hard_timeout): return True logger.debug('%s task num failures is %s and limit is %s', self.id, self.failures.num_failures(), self.retry_policy.retry_count) if self.failures.num_failures() >= self.retry_policy.retry_count: logger.debug('%s task num failures limit(%s) is exceeded', self.id, self.retry_policy.retry_count) return True return False @property def pretty_id(self): param_str = ', '.join('{}={}'.format(key, value) for key, value in sorted(self.params.items())) return '{}({})'.format(self.family, param_str) class Worker(object): """ Structure for tracking worker activity and keeping their references. """ def __init__(self, worker_id, last_active=None): self.id = worker_id self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host) self.last_active = last_active or time.time() # seconds since epoch self.last_get_work = None self.started = time.time() # seconds since epoch self.tasks = set() # task objects self.info = {} self.disabled = False self.rpc_messages = [] def add_info(self, info): self.info.update(info) def update(self, worker_reference, get_work=False): if worker_reference: self.reference = worker_reference self.last_active = time.time() if get_work: self.last_get_work = time.time() def prune(self, config): # Delete workers that haven't said anything for a while (probably killed) if self.last_active + config.worker_disconnect_delay < time.time(): return True def get_tasks(self, state, *statuses): num_self_tasks = len(self.tasks) num_state_tasks = sum(len(state._status_tasks[status]) for status in statuses) if num_self_tasks < num_state_tasks: return six.moves.filter(lambda task: task.status in statuses, self.tasks) else: return six.moves.filter(lambda task: self.id in task.workers, state.get_active_tasks_by_status(*statuses)) def is_trivial_worker(self, state): """ If it's not an assistant having only tasks that are without requirements. We have to pass the state parameter for optimization reasons. """ if self.assistant: return False return all(not task.resources for task in self.get_tasks(state, PENDING)) @property def assistant(self): return self.info.get('assistant', False) @property def enabled(self): return not self.disabled @property def state(self): if self.enabled: return WORKER_STATE_ACTIVE else: return WORKER_STATE_DISABLED def add_rpc_message(self, name, **kwargs): # the message has the format {'name': <function_name>, 'kwargs': <function_kwargs>} self.rpc_messages.append({'name': name, 'kwargs': kwargs}) def fetch_rpc_messages(self): messages = self.rpc_messages[:] del self.rpc_messages[:] return messages def __str__(self): return self.id class SimpleTaskState(object): """ Keep track of the current state and handle persistance. The point of this class is to enable other ways to keep state, eg. by using a database These will be implemented by creating an abstract base class that this and other classes inherit from. """ def __init__(self, state_path): self._state_path = state_path self._tasks = {} # map from id to a Task object self._status_tasks = collections.defaultdict(dict) self._active_workers = {} # map from id to a Worker object self._task_batchers = {} def get_state(self): return self._tasks, self._active_workers, self._task_batchers def set_state(self, state): self._tasks, self._active_workers = state[:2] if len(state) >= 3: self._task_batchers = state[2] def dump(self): try: with open(self._state_path, 'wb') as fobj: pickle.dump(self.get_state(), fobj) except IOError: logger.warning("Failed saving scheduler state", exc_info=1) else: logger.info("Saved state in %s", self._state_path) # prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control? def load(self): if os.path.exists(self._state_path): logger.info("Attempting to load state from %s", self._state_path) try: with open(self._state_path, 'rb') as fobj: state = pickle.load(fobj) except BaseException: logger.exception("Error when loading state. Starting from empty state.") return self.set_state(state) self._status_tasks = collections.defaultdict(dict) for task in six.itervalues(self._tasks): self._status_tasks[task.status][task.id] = task else: logger.info("No prior state file exists at %s. Starting with empty state", self._state_path) def get_active_tasks(self): return six.itervalues(self._tasks) def get_active_tasks_by_status(self, *statuses): return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status]) for status in statuses) def get_batch_running_tasks(self, batch_id): assert batch_id is not None return [ task for task in self.get_active_tasks_by_status(BATCH_RUNNING) if task.batch_id == batch_id ] def set_batcher(self, worker_id, family, batcher_args, max_batch_size): self._task_batchers.setdefault(worker_id, {}) self._task_batchers[worker_id][family] = (batcher_args, max_batch_size) def get_batcher(self, worker_id, family): return self._task_batchers.get(worker_id, {}).get(family, (None, 1)) def num_pending_tasks(self): """ Return how many tasks are PENDING + RUNNING. O(1). """ return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING]) def get_task(self, task_id, default=None, setdefault=None): if setdefault: task = self._tasks.setdefault(task_id, setdefault) self._status_tasks[task.status][task.id] = task return task else: return self._tasks.get(task_id, default) def has_task(self, task_id): return task_id in self._tasks def re_enable(self, task, config=None): task.scheduler_disable_time = None task.failures.clear() if config: self.set_status(task, FAILED, config) task.failures.clear() def set_batch_running(self, task, batch_id, worker_id): self.set_status(task, BATCH_RUNNING) task.batch_id = batch_id task.worker_running = worker_id task.resources_running = task.resources task.time_running = time.time() def set_status(self, task, new_status, config=None): if new_status == FAILED: assert config is not None if new_status == DISABLED and task.status in (RUNNING, BATCH_RUNNING): return remove_on_failure = task.batch_id is not None and not task.batchable if task.status == DISABLED: if new_status == DONE: self.re_enable(task) # don't allow workers to override a scheduler disable elif task.scheduler_disable_time is not None and new_status != DISABLED: return if task.status == RUNNING and task.batch_id is not None and new_status != RUNNING: for batch_task in self.get_batch_running_tasks(task.batch_id): self.set_status(batch_task, new_status, config) batch_task.batch_id = None task.batch_id = None if new_status == FAILED and task.status != DISABLED: task.add_failure() if task.has_excessive_failures(): task.scheduler_disable_time = time.time() new_status = DISABLED if not config.batch_emails: notifications.send_error_email( 'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id), '{task} failed {failures} times in the last {window} seconds, so it is being ' 'disabled for {persist} seconds'.format( failures=task.retry_policy.retry_count, task=task.id, window=config.disable_window, persist=config.disable_persist, )) elif new_status == DISABLED: task.scheduler_disable_time = None if new_status != task.status: self._status_tasks[task.status].pop(task.id) self._status_tasks[new_status][task.id] = task task.status = new_status task.updated = time.time() if new_status == FAILED: task.retry = time.time() + config.retry_delay if remove_on_failure: task.remove = time.time() def fail_dead_worker_task(self, task, config, assistants): # If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic if task.status in (BATCH_RUNNING, RUNNING) and task.worker_running and task.worker_running not in task.stakeholders | assistants: logger.info("Task %r is marked as running by disconnected worker %r -> marking as " "FAILED with retry delay of %rs", task.id, task.worker_running, config.retry_delay) task.worker_running = None self.set_status(task, FAILED, config) task.retry = time.time() + config.retry_delay def update_status(self, task, config): # Mark tasks with no remaining active stakeholders for deletion if (not task.stakeholders) and (task.remove is None) and (task.status != RUNNING): # We don't check for the RUNNING case, because that is already handled # by the fail_dead_worker_task function. logger.debug("Task %r has no stakeholders anymore -> might remove " "task in %s seconds", task.id, config.remove_delay) task.remove = time.time() + config.remove_delay # Re-enable task after the disable time expires if task.status == DISABLED and task.scheduler_disable_time is not None: if time.time() - task.scheduler_disable_time > config.disable_persist: self.re_enable(task, config) # Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0 if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time(): self.set_status(task, PENDING, config) def may_prune(self, task): return task.remove and time.time() >= task.remove def inactivate_tasks(self, delete_tasks): # The terminology is a bit confusing: we used to "delete" tasks when they became inactive, # but with a pluggable state storage, you might very well want to keep some history of # older tasks as well. That's why we call it "inactivate" (as in the verb) for task in delete_tasks: task_obj = self._tasks.pop(task) self._status_tasks[task_obj.status].pop(task) def get_active_workers(self, last_active_lt=None, last_get_work_gt=None): for worker in six.itervalues(self._active_workers): if last_active_lt is not None and worker.last_active >= last_active_lt: continue last_get_work = worker.last_get_work if last_get_work_gt is not None and ( last_get_work is None or last_get_work <= last_get_work_gt): continue yield worker def get_assistants(self, last_active_lt=None): return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt)) def get_worker_ids(self): return self._active_workers.keys() # only used for unit tests def get_worker(self, worker_id): return self._active_workers.setdefault(worker_id, Worker(worker_id)) def inactivate_workers(self, delete_workers): # Mark workers as inactive for worker in delete_workers: self._active_workers.pop(worker) self._remove_workers_from_tasks(delete_workers) def _remove_workers_from_tasks(self, workers, remove_stakeholders=True): for task in self.get_active_tasks(): if remove_stakeholders: task.stakeholders.difference_update(workers) task.workers -= workers def disable_workers(self, worker_ids): self._remove_workers_from_tasks(worker_ids, remove_stakeholders=False) for worker_id in worker_ids: self.get_worker(worker_id).disabled = True class Scheduler(object): """ Async scheduler that can handle multiple workers, etc. Can be run locally or on a server (using RemoteScheduler + server.Server). """ def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs): """ Keyword Arguments: :param config: an object of class "scheduler" or None (in which the global instance will be used) :param resources: a dict of str->int constraints :param task_history_impl: ignore config and use this object as the task history """ self._config = config or scheduler(**kwargs) self._state = SimpleTaskState(self._config.state_path) if task_history_impl: self._task_history = task_history_impl elif self._config.record_task_history: from luigi import db_task_history # Needs sqlalchemy, thus imported here self._task_history = db_task_history.DbTaskHistory() else: self._task_history = history.NopHistory() self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter? self._make_task = functools.partial(Task, retry_policy=self._config._get_retry_policy()) self._worker_requests = {} self._paused = False if self._config.batch_emails: self._email_batcher = BatchNotifier() def load(self): self._state.load() def dump(self): self._state.dump() if self._config.batch_emails: self._email_batcher.send_email() @rpc_method() def prune(self): logger.info("Starting pruning of task graph") self._prune_workers() self._prune_tasks() self._prune_emails() logger.info("Done pruning task graph") def _prune_workers(self): remove_workers = [] for worker in self._state.get_active_workers(): if worker.prune(self._config): logger.debug("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay) remove_workers.append(worker.id) self._state.inactivate_workers(remove_workers) def _prune_tasks(self): assistant_ids = set(w.id for w in self._state.get_assistants()) remove_tasks = [] for task in self._state.get_active_tasks(): self._state.fail_dead_worker_task(task, self._config, assistant_ids) self._state.update_status(task, self._config) if self._state.may_prune(task): logger.info("Removing task %r", task.id) remove_tasks.append(task.id) self._state.inactivate_tasks(remove_tasks) def _prune_emails(self): if self._config.batch_emails: self._email_batcher.update() def _update_worker(self, worker_id, worker_reference=None, get_work=False): # Keep track of whenever the worker was last active. # For convenience also return the worker object. worker = self._state.get_worker(worker_id) worker.update(worker_reference, get_work=get_work) return worker def _update_priority(self, task, prio, worker): """ Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled. """ task.priority = prio = max(prio, task.priority) for dep in task.deps or []: t = self._state.get_task(dep) if t is not None and prio > t.priority: self._update_priority(t, prio, worker) @rpc_method() def add_task_batcher(self, worker, task_family, batched_args, max_batch_size=float('inf')): self._state.set_batcher(worker, task_family, batched_args, max_batch_size) @rpc_method() def forgive_failures(self, task_id=None): status = PENDING task = self._state.get_task(task_id) if task is None: return {"task_id": task_id, "status": None} # we forgive only failures if task.status == FAILED: # forgive but do not forget self._update_task_history(task, status) self._state.set_status(task, status, self._config) return {"task_id": task_id, "status": task.status} @rpc_method() def add_task(self, task_id=None, status=PENDING, runnable=True, deps=None, new_deps=None, expl=None, resources=None, priority=0, family='', module=None, params=None, assistant=False, tracking_url=None, worker=None, batchable=None, batch_id=None, retry_policy_dict={}, owners=None, **kwargs): """ * add task identified by task_id if it doesn't exist * if deps is not None, update dependency list * update status of task * add additional workers/stakeholders * update priority when needed """ assert worker is not None worker_id = worker worker = self._update_worker(worker_id) retry_policy = self._generate_retry_policy(retry_policy_dict) if worker.enabled: _default_task = self._make_task( task_id=task_id, status=PENDING, deps=deps, resources=resources, priority=priority, family=family, module=module, params=params, ) else: _default_task = None task = self._state.get_task(task_id, setdefault=_default_task) if task is None or (task.status != RUNNING and not worker.enabled): return # for setting priority, we'll sometimes create tasks with unset family and params if not task.family: task.family = family if not getattr(task, 'module', None): task.module = module if not task.params: task.params = _get_default(params, {}) if batch_id is not None: task.batch_id = batch_id if status == RUNNING and not task.worker_running: task.worker_running = worker_id if batch_id: task.resources_running = self._state.get_batch_running_tasks(batch_id)[0].resources_running task.time_running = time.time() if tracking_url is not None or task.status != RUNNING: task.tracking_url = tracking_url if task.batch_id is not None: for batch_task in self._state.get_batch_running_tasks(task.batch_id): batch_task.tracking_url = tracking_url if batchable is not None: task.batchable = batchable if task.remove is not None: task.remove = None # unmark task for removal so it isn't removed after being added if expl is not None: task.expl = expl if task.batch_id is not None: for batch_task in self._state.get_batch_running_tasks(task.batch_id): batch_task.expl = expl if not (task.status in (RUNNING, BATCH_RUNNING) and status == PENDING) or new_deps: # don't allow re-scheduling of task while it is running, it must either fail or succeed first if status == PENDING or status != task.status: # Update the DB only if there was a acctual change, to prevent noise. # We also check for status == PENDING b/c that's the default value # (so checking for status != task.status woule lie) self._update_task_history(task, status) self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config) if status == FAILED and self._config.batch_emails: batched_params, _ = self._state.get_batcher(worker_id, family) if batched_params: unbatched_params = { param: value for param, value in six.iteritems(task.params) if param not in batched_params } else: unbatched_params = task.params try: expl_raw = json.loads(expl) except ValueError: expl_raw = expl self._email_batcher.add_failure( task.pretty_id, task.family, unbatched_params, expl_raw, owners) if task.status == DISABLED: self._email_batcher.add_disable( task.pretty_id, task.family, unbatched_params, owners) if deps is not None: task.deps = set(deps) if new_deps is not None: task.deps.update(new_deps) if resources is not None: task.resources = resources if worker.enabled and not assistant: task.stakeholders.add(worker_id) # Task dependencies might not exist yet. Let's create dummy tasks for them for now. # Otherwise the task dependencies might end up being pruned if scheduling takes a long time for dep in task.deps or []: t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority)) t.stakeholders.add(worker_id) self._update_priority(task, priority, worker_id) # Because some tasks (non-dynamic dependencies) are `_make_task`ed # before we know their retry_policy, we always set it here task.retry_policy = retry_policy if runnable and status != FAILED and worker.enabled: task.workers.add(worker_id) self._state.get_worker(worker_id).tasks.add(task) task.runnable = runnable @rpc_method() def announce_scheduling_failure(self, task_name, family, params, expl, owners, **kwargs): if not self._config.batch_emails: return worker_id = kwargs['worker'] batched_params, _ = self._state.get_batcher(worker_id, family) if batched_params: unbatched_params = { param: value for param, value in six.iteritems(params) if param not in batched_params } else: unbatched_params = params self._email_batcher.add_scheduling_fail(task_name, family, unbatched_params, expl, owners) @rpc_method() def add_worker(self, worker, info, **kwargs): self._state.get_worker(worker).add_info(info) @rpc_method() def disable_worker(self, worker): self._state.disable_workers({worker}) @rpc_method() def set_worker_processes(self, worker, n): self._state.get_worker(worker).add_rpc_message('set_worker_processes', n=n) @rpc_method() def is_paused(self): return {'paused': self._paused} @rpc_method() def pause(self): self._paused = True @rpc_method() def unpause(self): self._paused = False @rpc_method() def update_resources(self, **resources): if self._resources is None: self._resources = {} self._resources.update(resources) @rpc_method() def update_resource(self, resource, amount): if not isinstance(amount, int) or amount < 0: return False self._resources[resource] = amount return True def _generate_retry_policy(self, task_retry_policy_dict): retry_policy_dict = self._config._get_retry_policy()._asdict() retry_policy_dict.update({k: v for k, v in six.iteritems(task_retry_policy_dict) if v is not None}) return RetryPolicy(**retry_policy_dict) def _has_resources(self, needed_resources, used_resources): if needed_resources is None: return True available_resources = self._resources or {} for resource, amount in six.iteritems(needed_resources): if amount + used_resources[resource] > available_resources.get(resource, 1): return False return True def _used_resources(self): used_resources = collections.defaultdict(int) if self._resources is not None: for task in self._state.get_active_tasks_by_status(RUNNING): if getattr(task, 'resources_running', task.resources): for resource, amount in six.iteritems(getattr(task, 'resources_running', task.resources)): used_resources[resource] += amount return used_resources def _rank(self, task): """ Return worker's rank function for task scheduling. :return: """ return task.priority, -task.time def _schedulable(self, task): if task.status != PENDING: return False for dep in task.deps: dep_task = self._state.get_task(dep, default=None) if dep_task is None or dep_task.status != DONE: return False return True def _reset_orphaned_batch_running_tasks(self, worker_id): running_batch_ids = { task.batch_id for task in self._state.get_active_tasks_by_status(RUNNING) if task.worker_running == worker_id } orphaned_tasks = [ task for task in self._state.get_active_tasks_by_status(BATCH_RUNNING) if task.worker_running == worker_id and task.batch_id not in running_batch_ids ] for task in orphaned_tasks: self._state.set_status(task, PENDING) @rpc_method() def count_pending(self, worker): worker_id, worker = worker, self._state.get_worker(worker) num_pending, num_unique_pending, num_pending_last_scheduled = 0, 0, 0 running_tasks = [] upstream_status_table = {} for task in worker.get_tasks(self._state, RUNNING): if self._upstream_status(task.id, upstream_status_table) == UPSTREAM_DISABLED: continue # Return a list of currently running tasks to the client, # makes it easier to troubleshoot other_worker = self._state.get_worker(task.worker_running) if other_worker is not None: more_info = {'task_id': task.id, 'worker': str(other_worker)} more_info.update(other_worker.info) running_tasks.append(more_info) for task in worker.get_tasks(self._state, PENDING, FAILED): if self._upstream_status(task.id, upstream_status_table) == UPSTREAM_DISABLED: continue num_pending += 1 num_unique_pending += int(len(task.workers) == 1) num_pending_last_scheduled += int(task.workers.peek(last=True) == worker_id) return { 'n_pending_tasks': num_pending, 'n_unique_pending': num_unique_pending, 'n_pending_last_scheduled': num_pending_last_scheduled, 'worker_state': worker.state, 'running_tasks': running_tasks, } @rpc_method(allow_null=False) def get_work(self, host=None, assistant=False, current_tasks=None, worker=None, **kwargs): # TODO: remove any expired nodes # Algo: iterate over all nodes, find the highest priority node no dependencies and available # resources. # Resource checking looks both at currently available resources and at which resources would # be available if all running tasks died and we rescheduled all workers greedily. We do both # checks in order to prevent a worker with many low-priority tasks from starving other # workers with higher priority tasks that share the same resources. # TODO: remove tasks that can't be done, figure out if the worker has absolutely # nothing it can wait for if self._config.prune_on_get_work: self.prune() assert worker is not None worker_id = worker worker = self._update_worker( worker_id, worker_reference={'host': host}, get_work=True) if not worker.enabled: reply = {'n_pending_tasks': 0, 'running_tasks': [], 'task_id': None, 'n_unique_pending': 0, 'worker_state': worker.state, } return reply if assistant: self.add_worker(worker_id, [('assistant', assistant)]) batched_params, unbatched_params, batched_tasks, max_batch_size = None, None, [], 1 best_task = None if current_tasks is not None: ct_set = set(current_tasks) for task in sorted(self._state.get_active_tasks_by_status(RUNNING), key=self._rank): if task.worker_running == worker_id and task.id not in ct_set: best_task = task if current_tasks is not None: # batch running tasks that weren't claimed since the last get_work go back in the pool self._reset_orphaned_batch_running_tasks(worker_id) greedy_resources = collections.defaultdict(int) worker = self._state.get_worker(worker_id) if self._paused: relevant_tasks = [] elif worker.is_trivial_worker(self._state): relevant_tasks = worker.get_tasks(self._state, PENDING, RUNNING) used_resources = collections.defaultdict(int) greedy_workers = dict() # If there's no resources, then they can grab any task else: relevant_tasks = self._state.get_active_tasks_by_status(PENDING, RUNNING) used_resources = self._used_resources() activity_limit = time.time() - self._config.worker_disconnect_delay active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit) greedy_workers = dict((worker.id, worker.info.get('workers', 1)) for worker in active_workers) tasks = list(relevant_tasks) tasks.sort(key=self._rank, reverse=True) for task in tasks: if (best_task and batched_params and task.family == best_task.family and len(batched_tasks) < max_batch_size and task.is_batchable() and all( task.params.get(name) == value for name, value in unbatched_params.items()) and task.resources == best_task.resources and self._schedulable(task)): for name, params in batched_params.items(): params.append(task.params.get(name)) batched_tasks.append(task) if best_task: continue if task.status == RUNNING and (task.worker_running in greedy_workers): greedy_workers[task.worker_running] -= 1 for resource, amount in six.iteritems((getattr(task, 'resources_running', task.resources) or {})): greedy_resources[resource] += amount if self._schedulable(task) and self._has_resources(task.resources, greedy_resources): in_workers = (assistant and task.runnable) or worker_id in task.workers if in_workers and self._has_resources(task.resources, used_resources): best_task = task batch_param_names, max_batch_size = self._state.get_batcher( worker_id, task.family) if batch_param_names and task.is_batchable(): try: batched_params = { name: [task.params[name]] for name in batch_param_names } unbatched_params = { name: value for name, value in task.params.items() if name not in batched_params } batched_tasks.append(task) except KeyError: batched_params, unbatched_params = None, None else: workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers for task_worker in workers: if greedy_workers.get(task_worker, 0) > 0: # use up a worker greedy_workers[task_worker] -= 1 # keep track of the resources used in greedy scheduling for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount break reply = self.count_pending(worker_id) if len(batched_tasks) > 1: batch_string = '|'.join(task.id for task in batched_tasks) batch_id = hashlib.md5(batch_string.encode('utf-8')).hexdigest() for task in batched_tasks: self._state.set_batch_running(task, batch_id, worker_id) combined_params = best_task.params.copy() combined_params.update(batched_params) reply['task_id'] = None reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = combined_params reply['batch_id'] = batch_id reply['batch_task_ids'] = [task.id for task in batched_tasks] elif best_task: self._state.set_status(best_task, RUNNING, self._config) best_task.worker_running = worker_id best_task.resources_running = best_task.resources best_task.time_running = time.time() self._update_task_history(best_task, RUNNING, host=host) reply['task_id'] = best_task.id reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = best_task.params else: reply['task_id'] = None return reply @rpc_method(attempts=1) def ping(self, **kwargs): worker_id = kwargs['worker'] worker = self._update_worker(worker_id) return {"rpc_messages": worker.fetch_rpc_messages()} def _upstream_status(self, task_id, upstream_status_table): if task_id in upstream_status_table: return upstream_status_table[task_id] elif self._state.has_task(task_id): task_stack = [task_id] while task_stack: dep_id = task_stack.pop() dep = self._state.get_task(dep_id) if dep: if dep.status == DONE: continue if dep_id not in upstream_status_table: if dep.status == PENDING and dep.deps: task_stack += [dep_id] + list(dep.deps) upstream_status_table[dep_id] = '' # will be updated postorder else: dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '') upstream_status_table[dep_id] = dep_status elif upstream_status_table[dep_id] == '' and dep.deps: # This is the postorder update step when we set the # status based on the previously calculated child elements status = max((upstream_status_table.get(a_task_id, '') for a_task_id in dep.deps), key=UPSTREAM_SEVERITY_KEY) upstream_status_table[dep_id] = status return upstream_status_table[dep_id] def _serialize_task(self, task_id, include_deps=True, deps=None): task = self._state.get_task(task_id) ret = { 'display_name': task.pretty_id, 'status': task.status, 'workers': list(task.workers), 'worker_running': task.worker_running, 'time_running': getattr(task, "time_running", None), 'start_time': task.time, 'last_updated': getattr(task, "updated", task.time), 'params': task.params, 'name': task.family, 'priority': task.priority, 'resources': task.resources, 'tracking_url': getattr(task, "tracking_url", None), 'status_message': getattr(task, "status_message", None) } if task.status == DISABLED: ret['re_enable_able'] = task.scheduler_disable_time is not None if include_deps: ret['deps'] = list(task.deps if deps is None else deps) return ret @rpc_method() def graph(self, **kwargs): self.prune() serialized = {} seen = set() for task in self._state.get_active_tasks(): serialized.update(self._traverse_graph(task.id, seen)) return serialized def _filter_done(self, task_ids): for task_id in task_ids: task = self._state.get_task(task_id) if task is None or task.status != DONE: yield task_id def _traverse_graph(self, root_task_id, seen=None, dep_func=None, include_done=True): """ Returns the dependency graph rooted at task_id This does a breadth-first traversal to find the nodes closest to the root before hitting the scheduler.max_graph_nodes limit. :param root_task_id: the id of the graph's root :return: A map of task id to serialized node """ if seen is None: seen = set() elif root_task_id in seen: return {} if dep_func is None: def dep_func(t): return t.deps seen.add(root_task_id) serialized = {} queue = collections.deque([root_task_id]) while queue: task_id = queue.popleft() task = self._state.get_task(task_id) if task is None or not task.family: logger.debug('Missing task for id [%s]', task_id) # NOTE : If a dependency is missing from self._state there is no way to deduce the # task family and parameters. family_match = TASK_FAMILY_RE.match(task_id) family = family_match.group(1) if family_match else UNKNOWN params = {'task_id': task_id} serialized[task_id] = { 'deps': [], 'status': UNKNOWN, 'workers': [], 'start_time': UNKNOWN, 'params': params, 'name': family, 'display_name': task_id, 'priority': 0, } else: deps = dep_func(task) if not include_done: deps = list(self._filter_done(deps)) serialized[task_id] = self._serialize_task(task_id, deps=deps) for dep in sorted(deps): if dep not in seen: seen.add(dep) queue.append(dep) if task_id != root_task_id: del serialized[task_id]['display_name'] if len(serialized) >= self._config.max_graph_nodes: break return serialized @rpc_method() def dep_graph(self, task_id, include_done=True, **kwargs): self.prune() if not self._state.has_task(task_id): return {} return self._traverse_graph(task_id, include_done=include_done) @rpc_method() def inverse_dep_graph(self, task_id, include_done=True, **kwargs): self.prune() if not self._state.has_task(task_id): return {} inverse_graph = collections.defaultdict(set) for task in self._state.get_active_tasks(): for dep in task.deps: inverse_graph[dep].add(task.id) return self._traverse_graph( task_id, dep_func=lambda t: inverse_graph[t.id], include_done=include_done) @rpc_method() def task_list(self, status='', upstream_status='', limit=True, search=None, max_shown_tasks=None, **kwargs): """ Query for a subset of tasks by status. """ self.prune() result = {} upstream_status_table = {} # used to memoize upstream status if search is None: def filter_func(_): return True else: terms = search.split() def filter_func(t): return all(term in t.pretty_id for term in terms) tasks = self._state.get_active_tasks_by_status(status) if status else self._state.get_active_tasks() for task in filter(filter_func, tasks): if task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table): serialized = self._serialize_task(task.id, include_deps=False) result[task.id] = serialized if limit and len(result) > (max_shown_tasks or self._config.max_shown_tasks): return {'num_tasks': len(result)} return result def _first_task_display_name(self, worker): task_id = worker.info.get('first_task', '') if self._state.has_task(task_id): return self._state.get_task(task_id).pretty_id else: return task_id @rpc_method() def worker_list(self, include_running=True, **kwargs): self.prune() workers = [ dict( name=worker.id, last_active=worker.last_active, started=worker.started, state=worker.state, first_task_display_name=self._first_task_display_name(worker), num_unread_rpc_messages=len(worker.rpc_messages), **worker.info ) for worker in self._state.get_active_workers()] workers.sort(key=lambda worker: worker['started'], reverse=True) if include_running: running = collections.defaultdict(dict) for task in self._state.get_active_tasks_by_status(RUNNING): if task.worker_running: running[task.worker_running][task.id] = self._serialize_task(task.id, include_deps=False) num_pending = collections.defaultdict(int) num_uniques = collections.defaultdict(int) for task in self._state.get_active_tasks_by_status(PENDING): for worker in task.workers: num_pending[worker] += 1 if len(task.workers) == 1: num_uniques[list(task.workers)[0]] += 1 for worker in workers: tasks = running[worker['name']] worker['num_running'] = len(tasks) worker['num_pending'] = num_pending[worker['name']] worker['num_uniques'] = num_uniques[worker['name']] worker['running'] = tasks return workers @rpc_method() def resource_list(self): """ Resources usage info and their consumers (tasks). """ self.prune() resources = [ dict( name=resource, num_total=r_dict['total'], num_used=r_dict['used'] ) for resource, r_dict in six.iteritems(self.resources())] if self._resources is not None: consumers = collections.defaultdict(dict) for task in self._state.get_active_tasks_by_status(RUNNING): if task.status == RUNNING and task.resources: for resource, amount in six.iteritems(task.resources): consumers[resource][task.id] = self._serialize_task(task.id, include_deps=False) for resource in resources: tasks = consumers[resource['name']] resource['num_consumer'] = len(tasks) resource['running'] = tasks return resources def resources(self): ''' get total resources and available ones ''' used_resources = self._used_resources() ret = collections.defaultdict(dict) for resource, total in six.iteritems(self._resources): ret[resource]['total'] = total if resource in used_resources: ret[resource]['used'] = used_resources[resource] else: ret[resource]['used'] = 0 return ret @rpc_method() def task_search(self, task_str, **kwargs): """ Query for a subset of tasks by task_id. :param task_str: :return: """ self.prune() result = collections.defaultdict(dict) for task in self._state.get_active_tasks(): if task.id.find(task_str) != -1: serialized = self._serialize_task(task.id, include_deps=False) result[task.status][task.id] = serialized return result @rpc_method() def re_enable_task(self, task_id): serialized = {} task = self._state.get_task(task_id) if task and task.status == DISABLED and task.scheduler_disable_time: self._state.re_enable(task, self._config) serialized = self._serialize_task(task_id) return serialized @rpc_method() def fetch_error(self, task_id, **kwargs): if self._state.has_task(task_id): task = self._state.get_task(task_id) return {"taskId": task_id, "error": task.expl, 'displayName': task.pretty_id} else: return {"taskId": task_id, "error": ""} @rpc_method() def set_task_status_message(self, task_id, status_message): if self._state.has_task(task_id): task = self._state.get_task(task_id) task.status_message = status_message if task.status == RUNNING and task.batch_id is not None: for batch_task in self._state.get_batch_running_tasks(task.batch_id): batch_task.status_message = status_message @rpc_method() def get_task_status_message(self, task_id): if self._state.has_task(task_id): task = self._state.get_task(task_id) return {"taskId": task_id, "statusMessage": task.status_message} else: return {"taskId": task_id, "statusMessage": ""} def _update_task_history(self, task, status, host=None): try: if status == DONE or status == FAILED: successful = (status == DONE) self._task_history.task_finished(task, successful) elif status == PENDING: self._task_history.task_scheduled(task) elif status == RUNNING: self._task_history.task_started(task, host) except BaseException: logger.warning("Error saving Task history", exc_info=True) @property def task_history(self): # Used by server.py to expose the calls return self._task_history # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The system for scheduling tasks and executing them in order. Deals with dependencies, priorities, resources, etc. The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them. See :doc:`/central_scheduler` for more info. """ import collections import inspect import json from luigi.batch_notifier import BatchNotifier try: import cPickle as pickle except ImportError: import pickle import functools import hashlib import itertools import logging import os import re import time from luigi import six from luigi import configuration from luigi import notifications from luigi import parameter from luigi import task_history as history from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN, \ BATCH_RUNNING from luigi.task import Config logger = logging.getLogger(__name__) UPSTREAM_RUNNING = 'UPSTREAM_RUNNING' UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT' UPSTREAM_FAILED = 'UPSTREAM_FAILED' UPSTREAM_DISABLED = 'UPSTREAM_DISABLED' UPSTREAM_SEVERITY_ORDER = ( '', UPSTREAM_RUNNING, UPSTREAM_MISSING_INPUT, UPSTREAM_FAILED, UPSTREAM_DISABLED, ) UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index STATUS_TO_UPSTREAM_MAP = { FAILED: UPSTREAM_FAILED, RUNNING: UPSTREAM_RUNNING, BATCH_RUNNING: UPSTREAM_RUNNING, PENDING: UPSTREAM_MISSING_INPUT, DISABLED: UPSTREAM_DISABLED, } WORKER_STATE_DISABLED = 'disabled' WORKER_STATE_ACTIVE = 'active' TASK_FAMILY_RE = re.compile(r'([^(_]+)[(_]') RPC_METHODS = {} _retry_policy_fields = [ "retry_count", "disable_hard_timeout", "disable_window", ] RetryPolicy = collections.namedtuple("RetryPolicy", _retry_policy_fields) def _get_empty_retry_policy(): return RetryPolicy(*[None] * len(_retry_policy_fields)) def rpc_method(**request_args): def _rpc_method(fn): # If request args are passed, return this function again for use as # the decorator function with the request args attached. fn_args = inspect.getargspec(fn) assert not fn_args.varargs assert fn_args.args[0] == 'self' all_args = fn_args.args[1:] defaults = dict(zip(reversed(all_args), reversed(fn_args.defaults or ()))) required_args = frozenset(arg for arg in all_args if arg not in defaults) fn_name = fn.__name__ @functools.wraps(fn) def rpc_func(self, *args, **kwargs): actual_args = defaults.copy() actual_args.update(dict(zip(all_args, args))) actual_args.update(kwargs) if not all(arg in actual_args for arg in required_args): raise TypeError('{} takes {} arguments ({} given)'.format( fn_name, len(all_args), len(actual_args))) return self._request('/api/{}'.format(fn_name), actual_args, **request_args) RPC_METHODS[fn_name] = rpc_func return fn return _rpc_method class scheduler(Config): # TODO(erikbern): the config_path is needed for backwards compatilibity. We # should drop the compatibility at some point retry_delay = parameter.FloatParameter(default=900.0) remove_delay = parameter.FloatParameter(default=600.0) worker_disconnect_delay = parameter.FloatParameter(default=60.0) state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle') batch_emails = parameter.BoolParameter(default=False, description="Send e-mails in batches rather than immediately") # Jobs are disabled if we see more than retry_count failures in disable_window seconds. # These disables last for disable_persist seconds. disable_window = parameter.IntParameter(default=3600, config_path=dict(section='scheduler', name='disable-window-seconds')) retry_count = parameter.IntParameter(default=999999999, config_path=dict(section='scheduler', name='disable_failures')) disable_hard_timeout = parameter.IntParameter(default=999999999, config_path=dict(section='scheduler', name='disable-hard-timeout')) disable_persist = parameter.IntParameter(default=86400, config_path=dict(section='scheduler', name='disable-persist-seconds')) max_shown_tasks = parameter.IntParameter(default=100000) max_graph_nodes = parameter.IntParameter(default=100000) record_task_history = parameter.BoolParameter(default=False) prune_on_get_work = parameter.BoolParameter(default=False) def _get_retry_policy(self): return RetryPolicy(self.retry_count, self.disable_hard_timeout, self.disable_window) class Failures(object): """ This class tracks the number of failures in a given time window. Failures added are marked with the current timestamp, and this class counts the number of failures in a sliding time window ending at the present. """ def __init__(self, window): """ Initialize with the given window. :param window: how long to track failures for, as a float (number of seconds). """ self.window = window self.failures = collections.deque() self.first_failure_time = None def add_failure(self): """ Add a failure event with the current timestamp. """ failure_time = time.time() if not self.first_failure_time: self.first_failure_time = failure_time self.failures.append(failure_time) def num_failures(self): """ Return the number of failures in the window. """ min_time = time.time() - self.window while self.failures and self.failures[0] < min_time: self.failures.popleft() return len(self.failures) def clear(self): """ Clear the failure queue. """ self.failures.clear() def _get_default(x, default): if x is not None: return x else: return default class OrderedSet(collections.MutableSet): """ Standard Python OrderedSet recipe found at http://code.activestate.com/recipes/576694/ Modified to include a peek function to get the last element """ def __init__(self, iterable=None): self.end = end = [] end += [None, end, end] # sentinel node for doubly linked list self.map = {} # key --> [key, prev, next] if iterable is not None: self |= iterable def __len__(self): return len(self.map) def __contains__(self, key): return key in self.map def add(self, key): if key not in self.map: end = self.end curr = end[1] curr[2] = end[1] = self.map[key] = [key, curr, end] def discard(self, key): if key in self.map: key, prev, next = self.map.pop(key) prev[2] = next next[1] = prev def __iter__(self): end = self.end curr = end[2] while curr is not end: yield curr[0] curr = curr[2] def __reversed__(self): end = self.end curr = end[1] while curr is not end: yield curr[0] curr = curr[1] def peek(self, last=True): if not self: raise KeyError('set is empty') key = self.end[1][0] if last else self.end[2][0] return key def pop(self, last=True): key = self.peek(last) self.discard(key) return key def __repr__(self): if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, list(self)) def __eq__(self, other): if isinstance(other, OrderedSet): return len(self) == len(other) and list(self) == list(other) return set(self) == set(other) class Task(object): def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None, params=None, tracking_url=None, status_message=None, retry_policy='notoptional'): self.id = task_id self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active) self.workers = OrderedSet() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active if deps is None: self.deps = set() else: self.deps = set(deps) self.status = status # PENDING, RUNNING, FAILED or DONE self.time = time.time() # Timestamp when task was first added self.updated = self.time self.retry = None self.remove = None self.worker_running = None # the worker id that is currently running the task or None self.time_running = None # Timestamp when picked up by worker self.expl = None self.priority = priority self.resources = _get_default(resources, {}) self.family = family self.module = module self.params = _get_default(params, {}) self.retry_policy = retry_policy self.failures = Failures(self.retry_policy.disable_window) self.tracking_url = tracking_url self.status_message = status_message self.scheduler_disable_time = None self.runnable = False self.batchable = False self.batch_id = None def __repr__(self): return "Task(%r)" % vars(self) # TODO(2017-08-10) replace this function with direct calls to batchable # this only exists for backward compatibility def is_batchable(self): try: return self.batchable except AttributeError: return False def add_failure(self): self.failures.add_failure() def has_excessive_failures(self): if self.failures.first_failure_time is not None: if (time.time() >= self.failures.first_failure_time + self.retry_policy.disable_hard_timeout): return True logger.debug('%s task num failures is %s and limit is %s', self.id, self.failures.num_failures(), self.retry_policy.retry_count) if self.failures.num_failures() >= self.retry_policy.retry_count: logger.debug('%s task num failures limit(%s) is exceeded', self.id, self.retry_policy.retry_count) return True return False @property def pretty_id(self): param_str = ', '.join('{}={}'.format(key, value) for key, value in sorted(self.params.items())) return '{}({})'.format(self.family, param_str) class Worker(object): """ Structure for tracking worker activity and keeping their references. """ def __init__(self, worker_id, last_active=None): self.id = worker_id self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host) self.last_active = last_active or time.time() # seconds since epoch self.last_get_work = None self.started = time.time() # seconds since epoch self.tasks = set() # task objects self.info = {} self.disabled = False self.rpc_messages = [] def add_info(self, info): self.info.update(info) def update(self, worker_reference, get_work=False): if worker_reference: self.reference = worker_reference self.last_active = time.time() if get_work: self.last_get_work = time.time() def prune(self, config): # Delete workers that haven't said anything for a while (probably killed) if self.last_active + config.worker_disconnect_delay < time.time(): return True def get_tasks(self, state, *statuses): num_self_tasks = len(self.tasks) num_state_tasks = sum(len(state._status_tasks[status]) for status in statuses) if num_self_tasks < num_state_tasks: return six.moves.filter(lambda task: task.status in statuses, self.tasks) else: return six.moves.filter(lambda task: self.id in task.workers, state.get_active_tasks_by_status(*statuses)) def is_trivial_worker(self, state): """ If it's not an assistant having only tasks that are without requirements. We have to pass the state parameter for optimization reasons. """ if self.assistant: return False return all(not task.resources for task in self.get_tasks(state, PENDING)) @property def assistant(self): return self.info.get('assistant', False) @property def enabled(self): return not self.disabled @property def state(self): if self.enabled: return WORKER_STATE_ACTIVE else: return WORKER_STATE_DISABLED def add_rpc_message(self, name, **kwargs): # the message has the format {'name': <function_name>, 'kwargs': <function_kwargs>} self.rpc_messages.append({'name': name, 'kwargs': kwargs}) def fetch_rpc_messages(self): messages = self.rpc_messages[:] del self.rpc_messages[:] return messages def __str__(self): return self.id class SimpleTaskState(object): """ Keep track of the current state and handle persistance. The point of this class is to enable other ways to keep state, eg. by using a database These will be implemented by creating an abstract base class that this and other classes inherit from. """ def __init__(self, state_path): self._state_path = state_path self._tasks = {} # map from id to a Task object self._status_tasks = collections.defaultdict(dict) self._active_workers = {} # map from id to a Worker object self._task_batchers = {} def get_state(self): return self._tasks, self._active_workers, self._task_batchers def set_state(self, state): self._tasks, self._active_workers = state[:2] if len(state) >= 3: self._task_batchers = state[2] def dump(self): try: with open(self._state_path, 'wb') as fobj: pickle.dump(self.get_state(), fobj) except IOError: logger.warning("Failed saving scheduler state", exc_info=1) else: logger.info("Saved state in %s", self._state_path) # prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control? def load(self): if os.path.exists(self._state_path): logger.info("Attempting to load state from %s", self._state_path) try: with open(self._state_path, 'rb') as fobj: state = pickle.load(fobj) except BaseException: logger.exception("Error when loading state. Starting from empty state.") return self.set_state(state) self._status_tasks = collections.defaultdict(dict) for task in six.itervalues(self._tasks): self._status_tasks[task.status][task.id] = task else: logger.info("No prior state file exists at %s. Starting with empty state", self._state_path) def get_active_tasks(self): return six.itervalues(self._tasks) def get_active_tasks_by_status(self, *statuses): return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status]) for status in statuses) def get_batch_running_tasks(self, batch_id): assert batch_id is not None return [ task for task in self.get_active_tasks_by_status(BATCH_RUNNING) if task.batch_id == batch_id ] def set_batcher(self, worker_id, family, batcher_args, max_batch_size): self._task_batchers.setdefault(worker_id, {}) self._task_batchers[worker_id][family] = (batcher_args, max_batch_size) def get_batcher(self, worker_id, family): return self._task_batchers.get(worker_id, {}).get(family, (None, 1)) def num_pending_tasks(self): """ Return how many tasks are PENDING + RUNNING. O(1). """ return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING]) def get_task(self, task_id, default=None, setdefault=None): if setdefault: task = self._tasks.setdefault(task_id, setdefault) self._status_tasks[task.status][task.id] = task return task else: return self._tasks.get(task_id, default) def has_task(self, task_id): return task_id in self._tasks def re_enable(self, task, config=None): task.scheduler_disable_time = None task.failures.clear() if config: self.set_status(task, FAILED, config) task.failures.clear() def set_batch_running(self, task, batch_id, worker_id): self.set_status(task, BATCH_RUNNING) task.batch_id = batch_id task.worker_running = worker_id task.resources_running = task.resources task.time_running = time.time() def set_status(self, task, new_status, config=None): if new_status == FAILED: assert config is not None if new_status == DISABLED and task.status in (RUNNING, BATCH_RUNNING): return remove_on_failure = task.batch_id is not None and not task.batchable if task.status == DISABLED: if new_status == DONE: self.re_enable(task) # don't allow workers to override a scheduler disable elif task.scheduler_disable_time is not None and new_status != DISABLED: return if task.status == RUNNING and task.batch_id is not None and new_status != RUNNING: for batch_task in self.get_batch_running_tasks(task.batch_id): self.set_status(batch_task, new_status, config) batch_task.batch_id = None task.batch_id = None if new_status == FAILED and task.status != DISABLED: task.add_failure() if task.has_excessive_failures(): task.scheduler_disable_time = time.time() new_status = DISABLED if not config.batch_emails: notifications.send_error_email( 'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id), '{task} failed {failures} times in the last {window} seconds, so it is being ' 'disabled for {persist} seconds'.format( failures=task.retry_policy.retry_count, task=task.id, window=config.disable_window, persist=config.disable_persist, )) elif new_status == DISABLED: task.scheduler_disable_time = None if new_status != task.status: self._status_tasks[task.status].pop(task.id) self._status_tasks[new_status][task.id] = task task.status = new_status task.updated = time.time() if new_status == FAILED: task.retry = time.time() + config.retry_delay if remove_on_failure: task.remove = time.time() def fail_dead_worker_task(self, task, config, assistants): # If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic if task.status in (BATCH_RUNNING, RUNNING) and task.worker_running and task.worker_running not in task.stakeholders | assistants: logger.info("Task %r is marked as running by disconnected worker %r -> marking as " "FAILED with retry delay of %rs", task.id, task.worker_running, config.retry_delay) task.worker_running = None self.set_status(task, FAILED, config) task.retry = time.time() + config.retry_delay def update_status(self, task, config): # Mark tasks with no remaining active stakeholders for deletion if (not task.stakeholders) and (task.remove is None) and (task.status != RUNNING): # We don't check for the RUNNING case, because that is already handled # by the fail_dead_worker_task function. logger.debug("Task %r has no stakeholders anymore -> might remove " "task in %s seconds", task.id, config.remove_delay) task.remove = time.time() + config.remove_delay # Re-enable task after the disable time expires if task.status == DISABLED and task.scheduler_disable_time is not None: if time.time() - task.scheduler_disable_time > config.disable_persist: self.re_enable(task, config) # Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0 if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time(): self.set_status(task, PENDING, config) def may_prune(self, task): return task.remove and time.time() >= task.remove def inactivate_tasks(self, delete_tasks): # The terminology is a bit confusing: we used to "delete" tasks when they became inactive, # but with a pluggable state storage, you might very well want to keep some history of # older tasks as well. That's why we call it "inactivate" (as in the verb) for task in delete_tasks: task_obj = self._tasks.pop(task) self._status_tasks[task_obj.status].pop(task) def get_active_workers(self, last_active_lt=None, last_get_work_gt=None): for worker in six.itervalues(self._active_workers): if last_active_lt is not None and worker.last_active >= last_active_lt: continue last_get_work = worker.last_get_work if last_get_work_gt is not None and ( last_get_work is None or last_get_work <= last_get_work_gt): continue yield worker def get_assistants(self, last_active_lt=None): return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt)) def get_worker_ids(self): return self._active_workers.keys() # only used for unit tests def get_worker(self, worker_id): return self._active_workers.setdefault(worker_id, Worker(worker_id)) def inactivate_workers(self, delete_workers): # Mark workers as inactive for worker in delete_workers: self._active_workers.pop(worker) self._remove_workers_from_tasks(delete_workers) def _remove_workers_from_tasks(self, workers, remove_stakeholders=True): for task in self.get_active_tasks(): if remove_stakeholders: task.stakeholders.difference_update(workers) task.workers -= workers def disable_workers(self, worker_ids): self._remove_workers_from_tasks(worker_ids, remove_stakeholders=False) for worker_id in worker_ids: self.get_worker(worker_id).disabled = True class Scheduler(object): """ Async scheduler that can handle multiple workers, etc. Can be run locally or on a server (using RemoteScheduler + server.Server). """ def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs): """ Keyword Arguments: :param config: an object of class "scheduler" or None (in which the global instance will be used) :param resources: a dict of str->int constraints :param task_history_impl: ignore config and use this object as the task history """ self._config = config or scheduler(**kwargs) self._state = SimpleTaskState(self._config.state_path) if task_history_impl: self._task_history = task_history_impl elif self._config.record_task_history: from luigi import db_task_history # Needs sqlalchemy, thus imported here self._task_history = db_task_history.DbTaskHistory() else: self._task_history = history.NopHistory() self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter? self._make_task = functools.partial(Task, retry_policy=self._config._get_retry_policy()) self._worker_requests = {} self._paused = False if self._config.batch_emails: self._email_batcher = BatchNotifier() def load(self): self._state.load() def dump(self): self._state.dump() if self._config.batch_emails: self._email_batcher.send_email() @rpc_method() def prune(self): logger.info("Starting pruning of task graph") self._prune_workers() self._prune_tasks() self._prune_emails() logger.info("Done pruning task graph") def _prune_workers(self): remove_workers = [] for worker in self._state.get_active_workers(): if worker.prune(self._config): logger.debug("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay) remove_workers.append(worker.id) self._state.inactivate_workers(remove_workers) def _prune_tasks(self): assistant_ids = set(w.id for w in self._state.get_assistants()) remove_tasks = [] for task in self._state.get_active_tasks(): self._state.fail_dead_worker_task(task, self._config, assistant_ids) self._state.update_status(task, self._config) if self._state.may_prune(task): logger.info("Removing task %r", task.id) remove_tasks.append(task.id) self._state.inactivate_tasks(remove_tasks) def _prune_emails(self): if self._config.batch_emails: self._email_batcher.update() def _update_worker(self, worker_id, worker_reference=None, get_work=False): # Keep track of whenever the worker was last active. # For convenience also return the worker object. worker = self._state.get_worker(worker_id) worker.update(worker_reference, get_work=get_work) return worker def _update_priority(self, task, prio, worker): """ Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled. """ task.priority = prio = max(prio, task.priority) for dep in task.deps or []: t = self._state.get_task(dep) if t is not None and prio > t.priority: self._update_priority(t, prio, worker) @rpc_method() def add_task_batcher(self, worker, task_family, batched_args, max_batch_size=float('inf')): self._state.set_batcher(worker, task_family, batched_args, max_batch_size) @rpc_method() def forgive_failures(self, task_id=None): status = PENDING task = self._state.get_task(task_id) if task is None: return {"task_id": task_id, "status": None} # we forgive only failures if task.status == FAILED: # forgive but do not forget self._update_task_history(task, status) self._state.set_status(task, status, self._config) return {"task_id": task_id, "status": task.status} @rpc_method() def add_task(self, task_id=None, status=PENDING, runnable=True, deps=None, new_deps=None, expl=None, resources=None, priority=0, family='', module=None, params=None, assistant=False, tracking_url=None, worker=None, batchable=None, batch_id=None, retry_policy_dict={}, owners=None, **kwargs): """ * add task identified by task_id if it doesn't exist * if deps is not None, update dependency list * update status of task * add additional workers/stakeholders * update priority when needed """ assert worker is not None worker_id = worker worker = self._update_worker(worker_id) retry_policy = self._generate_retry_policy(retry_policy_dict) if worker.enabled: _default_task = self._make_task( task_id=task_id, status=PENDING, deps=deps, resources=resources, priority=priority, family=family, module=module, params=params, ) else: _default_task = None task = self._state.get_task(task_id, setdefault=_default_task) if task is None or (task.status != RUNNING and not worker.enabled): return # for setting priority, we'll sometimes create tasks with unset family and params if not task.family: task.family = family if not getattr(task, 'module', None): task.module = module if not task.params: task.params = _get_default(params, {}) if batch_id is not None: task.batch_id = batch_id if status == RUNNING and not task.worker_running: task.worker_running = worker_id if batch_id: task.resources_running = self._state.get_batch_running_tasks(batch_id)[0].resources_running task.time_running = time.time() if tracking_url is not None or task.status != RUNNING: task.tracking_url = tracking_url if task.batch_id is not None: for batch_task in self._state.get_batch_running_tasks(task.batch_id): batch_task.tracking_url = tracking_url if batchable is not None: task.batchable = batchable if task.remove is not None: task.remove = None # unmark task for removal so it isn't removed after being added if expl is not None: task.expl = expl if task.batch_id is not None: for batch_task in self._state.get_batch_running_tasks(task.batch_id): batch_task.expl = expl if not (task.status in (RUNNING, BATCH_RUNNING) and (status not in (DONE, FAILED, RUNNING) or task.worker_running != worker_id)) or new_deps: # don't allow re-scheduling of task while it is running, it must either fail or succeed on the worker actually running it if status == PENDING or status != task.status: # Update the DB only if there was a acctual change, to prevent noise. # We also check for status == PENDING b/c that's the default value # (so checking for status != task.status woule lie) self._update_task_history(task, status) self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config) if status == FAILED and self._config.batch_emails: batched_params, _ = self._state.get_batcher(worker_id, family) if batched_params: unbatched_params = { param: value for param, value in six.iteritems(task.params) if param not in batched_params } else: unbatched_params = task.params try: expl_raw = json.loads(expl) except ValueError: expl_raw = expl self._email_batcher.add_failure( task.pretty_id, task.family, unbatched_params, expl_raw, owners) if task.status == DISABLED: self._email_batcher.add_disable( task.pretty_id, task.family, unbatched_params, owners) if deps is not None: task.deps = set(deps) if new_deps is not None: task.deps.update(new_deps) if resources is not None: task.resources = resources if worker.enabled and not assistant: task.stakeholders.add(worker_id) # Task dependencies might not exist yet. Let's create dummy tasks for them for now. # Otherwise the task dependencies might end up being pruned if scheduling takes a long time for dep in task.deps or []: t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority)) t.stakeholders.add(worker_id) self._update_priority(task, priority, worker_id) # Because some tasks (non-dynamic dependencies) are `_make_task`ed # before we know their retry_policy, we always set it here task.retry_policy = retry_policy if runnable and status != FAILED and worker.enabled: task.workers.add(worker_id) self._state.get_worker(worker_id).tasks.add(task) task.runnable = runnable @rpc_method() def announce_scheduling_failure(self, task_name, family, params, expl, owners, **kwargs): if not self._config.batch_emails: return worker_id = kwargs['worker'] batched_params, _ = self._state.get_batcher(worker_id, family) if batched_params: unbatched_params = { param: value for param, value in six.iteritems(params) if param not in batched_params } else: unbatched_params = params self._email_batcher.add_scheduling_fail(task_name, family, unbatched_params, expl, owners) @rpc_method() def add_worker(self, worker, info, **kwargs): self._state.get_worker(worker).add_info(info) @rpc_method() def disable_worker(self, worker): self._state.disable_workers({worker}) @rpc_method() def set_worker_processes(self, worker, n): self._state.get_worker(worker).add_rpc_message('set_worker_processes', n=n) @rpc_method() def is_paused(self): return {'paused': self._paused} @rpc_method() def pause(self): self._paused = True @rpc_method() def unpause(self): self._paused = False @rpc_method() def update_resources(self, **resources): if self._resources is None: self._resources = {} self._resources.update(resources) @rpc_method() def update_resource(self, resource, amount): if not isinstance(amount, int) or amount < 0: return False self._resources[resource] = amount return True def _generate_retry_policy(self, task_retry_policy_dict): retry_policy_dict = self._config._get_retry_policy()._asdict() retry_policy_dict.update({k: v for k, v in six.iteritems(task_retry_policy_dict) if v is not None}) return RetryPolicy(**retry_policy_dict) def _has_resources(self, needed_resources, used_resources): if needed_resources is None: return True available_resources = self._resources or {} for resource, amount in six.iteritems(needed_resources): if amount + used_resources[resource] > available_resources.get(resource, 1): return False return True def _used_resources(self): used_resources = collections.defaultdict(int) if self._resources is not None: for task in self._state.get_active_tasks_by_status(RUNNING): if getattr(task, 'resources_running', task.resources): for resource, amount in six.iteritems(getattr(task, 'resources_running', task.resources)): used_resources[resource] += amount return used_resources def _rank(self, task): """ Return worker's rank function for task scheduling. :return: """ return task.priority, -task.time def _schedulable(self, task): if task.status != PENDING: return False for dep in task.deps: dep_task = self._state.get_task(dep, default=None) if dep_task is None or dep_task.status != DONE: return False return True def _reset_orphaned_batch_running_tasks(self, worker_id): running_batch_ids = { task.batch_id for task in self._state.get_active_tasks_by_status(RUNNING) if task.worker_running == worker_id } orphaned_tasks = [ task for task in self._state.get_active_tasks_by_status(BATCH_RUNNING) if task.worker_running == worker_id and task.batch_id not in running_batch_ids ] for task in orphaned_tasks: self._state.set_status(task, PENDING) @rpc_method() def count_pending(self, worker): worker_id, worker = worker, self._state.get_worker(worker) num_pending, num_unique_pending, num_pending_last_scheduled = 0, 0, 0 running_tasks = [] upstream_status_table = {} for task in worker.get_tasks(self._state, RUNNING): if self._upstream_status(task.id, upstream_status_table) == UPSTREAM_DISABLED: continue # Return a list of currently running tasks to the client, # makes it easier to troubleshoot other_worker = self._state.get_worker(task.worker_running) if other_worker is not None: more_info = {'task_id': task.id, 'worker': str(other_worker)} more_info.update(other_worker.info) running_tasks.append(more_info) for task in worker.get_tasks(self._state, PENDING, FAILED): if self._upstream_status(task.id, upstream_status_table) == UPSTREAM_DISABLED: continue num_pending += 1 num_unique_pending += int(len(task.workers) == 1) num_pending_last_scheduled += int(task.workers.peek(last=True) == worker_id) return { 'n_pending_tasks': num_pending, 'n_unique_pending': num_unique_pending, 'n_pending_last_scheduled': num_pending_last_scheduled, 'worker_state': worker.state, 'running_tasks': running_tasks, } @rpc_method(allow_null=False) def get_work(self, host=None, assistant=False, current_tasks=None, worker=None, **kwargs): # TODO: remove any expired nodes # Algo: iterate over all nodes, find the highest priority node no dependencies and available # resources. # Resource checking looks both at currently available resources and at which resources would # be available if all running tasks died and we rescheduled all workers greedily. We do both # checks in order to prevent a worker with many low-priority tasks from starving other # workers with higher priority tasks that share the same resources. # TODO: remove tasks that can't be done, figure out if the worker has absolutely # nothing it can wait for if self._config.prune_on_get_work: self.prune() assert worker is not None worker_id = worker worker = self._update_worker( worker_id, worker_reference={'host': host}, get_work=True) if not worker.enabled: reply = {'n_pending_tasks': 0, 'running_tasks': [], 'task_id': None, 'n_unique_pending': 0, 'worker_state': worker.state, } return reply if assistant: self.add_worker(worker_id, [('assistant', assistant)]) batched_params, unbatched_params, batched_tasks, max_batch_size = None, None, [], 1 best_task = None if current_tasks is not None: ct_set = set(current_tasks) for task in sorted(self._state.get_active_tasks_by_status(RUNNING), key=self._rank): if task.worker_running == worker_id and task.id not in ct_set: best_task = task if current_tasks is not None: # batch running tasks that weren't claimed since the last get_work go back in the pool self._reset_orphaned_batch_running_tasks(worker_id) greedy_resources = collections.defaultdict(int) worker = self._state.get_worker(worker_id) if self._paused: relevant_tasks = [] elif worker.is_trivial_worker(self._state): relevant_tasks = worker.get_tasks(self._state, PENDING, RUNNING) used_resources = collections.defaultdict(int) greedy_workers = dict() # If there's no resources, then they can grab any task else: relevant_tasks = self._state.get_active_tasks_by_status(PENDING, RUNNING) used_resources = self._used_resources() activity_limit = time.time() - self._config.worker_disconnect_delay active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit) greedy_workers = dict((worker.id, worker.info.get('workers', 1)) for worker in active_workers) tasks = list(relevant_tasks) tasks.sort(key=self._rank, reverse=True) for task in tasks: if (best_task and batched_params and task.family == best_task.family and len(batched_tasks) < max_batch_size and task.is_batchable() and all( task.params.get(name) == value for name, value in unbatched_params.items()) and task.resources == best_task.resources and self._schedulable(task)): for name, params in batched_params.items(): params.append(task.params.get(name)) batched_tasks.append(task) if best_task: continue if task.status == RUNNING and (task.worker_running in greedy_workers): greedy_workers[task.worker_running] -= 1 for resource, amount in six.iteritems((getattr(task, 'resources_running', task.resources) or {})): greedy_resources[resource] += amount if self._schedulable(task) and self._has_resources(task.resources, greedy_resources): in_workers = (assistant and task.runnable) or worker_id in task.workers if in_workers and self._has_resources(task.resources, used_resources): best_task = task batch_param_names, max_batch_size = self._state.get_batcher( worker_id, task.family) if batch_param_names and task.is_batchable(): try: batched_params = { name: [task.params[name]] for name in batch_param_names } unbatched_params = { name: value for name, value in task.params.items() if name not in batched_params } batched_tasks.append(task) except KeyError: batched_params, unbatched_params = None, None else: workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers for task_worker in workers: if greedy_workers.get(task_worker, 0) > 0: # use up a worker greedy_workers[task_worker] -= 1 # keep track of the resources used in greedy scheduling for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount break reply = self.count_pending(worker_id) if len(batched_tasks) > 1: batch_string = '|'.join(task.id for task in batched_tasks) batch_id = hashlib.md5(batch_string.encode('utf-8')).hexdigest() for task in batched_tasks: self._state.set_batch_running(task, batch_id, worker_id) combined_params = best_task.params.copy() combined_params.update(batched_params) reply['task_id'] = None reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = combined_params reply['batch_id'] = batch_id reply['batch_task_ids'] = [task.id for task in batched_tasks] elif best_task: self._state.set_status(best_task, RUNNING, self._config) best_task.worker_running = worker_id best_task.resources_running = best_task.resources best_task.time_running = time.time() self._update_task_history(best_task, RUNNING, host=host) reply['task_id'] = best_task.id reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = best_task.params else: reply['task_id'] = None return reply @rpc_method(attempts=1) def ping(self, **kwargs): worker_id = kwargs['worker'] worker = self._update_worker(worker_id) return {"rpc_messages": worker.fetch_rpc_messages()} def _upstream_status(self, task_id, upstream_status_table): if task_id in upstream_status_table: return upstream_status_table[task_id] elif self._state.has_task(task_id): task_stack = [task_id] while task_stack: dep_id = task_stack.pop() dep = self._state.get_task(dep_id) if dep: if dep.status == DONE: continue if dep_id not in upstream_status_table: if dep.status == PENDING and dep.deps: task_stack += [dep_id] + list(dep.deps) upstream_status_table[dep_id] = '' # will be updated postorder else: dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '') upstream_status_table[dep_id] = dep_status elif upstream_status_table[dep_id] == '' and dep.deps: # This is the postorder update step when we set the # status based on the previously calculated child elements status = max((upstream_status_table.get(a_task_id, '') for a_task_id in dep.deps), key=UPSTREAM_SEVERITY_KEY) upstream_status_table[dep_id] = status return upstream_status_table[dep_id] def _serialize_task(self, task_id, include_deps=True, deps=None): task = self._state.get_task(task_id) ret = { 'display_name': task.pretty_id, 'status': task.status, 'workers': list(task.workers), 'worker_running': task.worker_running, 'time_running': getattr(task, "time_running", None), 'start_time': task.time, 'last_updated': getattr(task, "updated", task.time), 'params': task.params, 'name': task.family, 'priority': task.priority, 'resources': task.resources, 'tracking_url': getattr(task, "tracking_url", None), 'status_message': getattr(task, "status_message", None) } if task.status == DISABLED: ret['re_enable_able'] = task.scheduler_disable_time is not None if include_deps: ret['deps'] = list(task.deps if deps is None else deps) return ret @rpc_method() def graph(self, **kwargs): self.prune() serialized = {} seen = set() for task in self._state.get_active_tasks(): serialized.update(self._traverse_graph(task.id, seen)) return serialized def _filter_done(self, task_ids): for task_id in task_ids: task = self._state.get_task(task_id) if task is None or task.status != DONE: yield task_id def _traverse_graph(self, root_task_id, seen=None, dep_func=None, include_done=True): """ Returns the dependency graph rooted at task_id This does a breadth-first traversal to find the nodes closest to the root before hitting the scheduler.max_graph_nodes limit. :param root_task_id: the id of the graph's root :return: A map of task id to serialized node """ if seen is None: seen = set() elif root_task_id in seen: return {} if dep_func is None: def dep_func(t): return t.deps seen.add(root_task_id) serialized = {} queue = collections.deque([root_task_id]) while queue: task_id = queue.popleft() task = self._state.get_task(task_id) if task is None or not task.family: logger.debug('Missing task for id [%s]', task_id) # NOTE : If a dependency is missing from self._state there is no way to deduce the # task family and parameters. family_match = TASK_FAMILY_RE.match(task_id) family = family_match.group(1) if family_match else UNKNOWN params = {'task_id': task_id} serialized[task_id] = { 'deps': [], 'status': UNKNOWN, 'workers': [], 'start_time': UNKNOWN, 'params': params, 'name': family, 'display_name': task_id, 'priority': 0, } else: deps = dep_func(task) if not include_done: deps = list(self._filter_done(deps)) serialized[task_id] = self._serialize_task(task_id, deps=deps) for dep in sorted(deps): if dep not in seen: seen.add(dep) queue.append(dep) if task_id != root_task_id: del serialized[task_id]['display_name'] if len(serialized) >= self._config.max_graph_nodes: break return serialized @rpc_method() def dep_graph(self, task_id, include_done=True, **kwargs): self.prune() if not self._state.has_task(task_id): return {} return self._traverse_graph(task_id, include_done=include_done) @rpc_method() def inverse_dep_graph(self, task_id, include_done=True, **kwargs): self.prune() if not self._state.has_task(task_id): return {} inverse_graph = collections.defaultdict(set) for task in self._state.get_active_tasks(): for dep in task.deps: inverse_graph[dep].add(task.id) return self._traverse_graph( task_id, dep_func=lambda t: inverse_graph[t.id], include_done=include_done) @rpc_method() def task_list(self, status='', upstream_status='', limit=True, search=None, max_shown_tasks=None, **kwargs): """ Query for a subset of tasks by status. """ self.prune() result = {} upstream_status_table = {} # used to memoize upstream status if search is None: def filter_func(_): return True else: terms = search.split() def filter_func(t): return all(term in t.pretty_id for term in terms) tasks = self._state.get_active_tasks_by_status(status) if status else self._state.get_active_tasks() for task in filter(filter_func, tasks): if task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table): serialized = self._serialize_task(task.id, include_deps=False) result[task.id] = serialized if limit and len(result) > (max_shown_tasks or self._config.max_shown_tasks): return {'num_tasks': len(result)} return result def _first_task_display_name(self, worker): task_id = worker.info.get('first_task', '') if self._state.has_task(task_id): return self._state.get_task(task_id).pretty_id else: return task_id @rpc_method() def worker_list(self, include_running=True, **kwargs): self.prune() workers = [ dict( name=worker.id, last_active=worker.last_active, started=worker.started, state=worker.state, first_task_display_name=self._first_task_display_name(worker), num_unread_rpc_messages=len(worker.rpc_messages), **worker.info ) for worker in self._state.get_active_workers()] workers.sort(key=lambda worker: worker['started'], reverse=True) if include_running: running = collections.defaultdict(dict) for task in self._state.get_active_tasks_by_status(RUNNING): if task.worker_running: running[task.worker_running][task.id] = self._serialize_task(task.id, include_deps=False) num_pending = collections.defaultdict(int) num_uniques = collections.defaultdict(int) for task in self._state.get_active_tasks_by_status(PENDING): for worker in task.workers: num_pending[worker] += 1 if len(task.workers) == 1: num_uniques[list(task.workers)[0]] += 1 for worker in workers: tasks = running[worker['name']] worker['num_running'] = len(tasks) worker['num_pending'] = num_pending[worker['name']] worker['num_uniques'] = num_uniques[worker['name']] worker['running'] = tasks return workers @rpc_method() def resource_list(self): """ Resources usage info and their consumers (tasks). """ self.prune() resources = [ dict( name=resource, num_total=r_dict['total'], num_used=r_dict['used'] ) for resource, r_dict in six.iteritems(self.resources())] if self._resources is not None: consumers = collections.defaultdict(dict) for task in self._state.get_active_tasks_by_status(RUNNING): if task.status == RUNNING and task.resources: for resource, amount in six.iteritems(task.resources): consumers[resource][task.id] = self._serialize_task(task.id, include_deps=False) for resource in resources: tasks = consumers[resource['name']] resource['num_consumer'] = len(tasks) resource['running'] = tasks return resources def resources(self): ''' get total resources and available ones ''' used_resources = self._used_resources() ret = collections.defaultdict(dict) for resource, total in six.iteritems(self._resources): ret[resource]['total'] = total if resource in used_resources: ret[resource]['used'] = used_resources[resource] else: ret[resource]['used'] = 0 return ret @rpc_method() def task_search(self, task_str, **kwargs): """ Query for a subset of tasks by task_id. :param task_str: :return: """ self.prune() result = collections.defaultdict(dict) for task in self._state.get_active_tasks(): if task.id.find(task_str) != -1: serialized = self._serialize_task(task.id, include_deps=False) result[task.status][task.id] = serialized return result @rpc_method() def re_enable_task(self, task_id): serialized = {} task = self._state.get_task(task_id) if task and task.status == DISABLED and task.scheduler_disable_time: self._state.re_enable(task, self._config) serialized = self._serialize_task(task_id) return serialized @rpc_method() def fetch_error(self, task_id, **kwargs): if self._state.has_task(task_id): task = self._state.get_task(task_id) return {"taskId": task_id, "error": task.expl, 'displayName': task.pretty_id} else: return {"taskId": task_id, "error": ""} @rpc_method() def set_task_status_message(self, task_id, status_message): if self._state.has_task(task_id): task = self._state.get_task(task_id) task.status_message = status_message if task.status == RUNNING and task.batch_id is not None: for batch_task in self._state.get_batch_running_tasks(task.batch_id): batch_task.status_message = status_message @rpc_method() def get_task_status_message(self, task_id): if self._state.has_task(task_id): task = self._state.get_task(task_id) return {"taskId": task_id, "statusMessage": task.status_message} else: return {"taskId": task_id, "statusMessage": ""} def _update_task_history(self, task, status, host=None): try: if status == DONE or status == FAILED: successful = (status == DONE) self._task_history.task_finished(task, successful) elif status == PENDING: self._task_history.task_scheduled(task) elif status == RUNNING: self._task_history.task_started(task, host) except BaseException: logger.warning("Error saving Task history", exc_info=True) @property def task_history(self): # Used by server.py to expose the calls return self._task_history
BugsInPy/BugsInPy/temp/projects/luigi/bug-7-fixed/luigi/luigi/scheduler.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-7-buggy/luigi/luigi/scheduler.py
luigi-bug-18
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The system for scheduling tasks and executing them in order. Deals with dependencies, priorities, resources, etc. The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them. See :doc:`/central_scheduler` for more info. """ import collections try: import cPickle as pickle except ImportError: import pickle import datetime import functools import itertools import logging import os import time from luigi import six from luigi import configuration from luigi import notifications from luigi import parameter from luigi import task_history as history from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN from luigi.task import Config logger = logging.getLogger("luigi.server") class Scheduler(object): """ Abstract base class. Note that the methods all take string arguments, not Task objects... """"" add_task = NotImplemented get_work = NotImplemented ping = NotImplemented UPSTREAM_RUNNING = 'UPSTREAM_RUNNING' UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT' UPSTREAM_FAILED = 'UPSTREAM_FAILED' UPSTREAM_DISABLED = 'UPSTREAM_DISABLED' UPSTREAM_SEVERITY_ORDER = ( '', UPSTREAM_RUNNING, UPSTREAM_MISSING_INPUT, UPSTREAM_FAILED, UPSTREAM_DISABLED, ) UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index STATUS_TO_UPSTREAM_MAP = { FAILED: UPSTREAM_FAILED, RUNNING: UPSTREAM_RUNNING, PENDING: UPSTREAM_MISSING_INPUT, DISABLED: UPSTREAM_DISABLED, } class scheduler(Config): # TODO(erikbern): the config_path is needed for backwards compatilibity. We should drop the compatibility # at some point (in particular this would force users to replace all dashes with underscores in the config) retry_delay = parameter.FloatParameter(default=900.0) remove_delay = parameter.FloatParameter(default=600.0) worker_disconnect_delay = parameter.FloatParameter(default=60.0) state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle') # Jobs are disabled if we see more than disable_failures failures in disable_window seconds. # These disables last for disable_persist seconds. disable_window = parameter.IntParameter(default=3600, config_path=dict(section='scheduler', name='disable-window-seconds')) disable_failures = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-num-failures')) disable_hard_timeout = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-hard-timeout')) disable_persist = parameter.IntParameter(default=86400, config_path=dict(section='scheduler', name='disable-persist-seconds')) max_shown_tasks = parameter.IntParameter(default=100000) prune_done_tasks = parameter.BoolParameter(default=False) record_task_history = parameter.BoolParameter(default=False) prune_on_get_work = parameter.BoolParameter(default=False) def fix_time(x): # Backwards compatibility for a fix in Dec 2014. Prior to the fix, pickled state might store datetime objects # Let's remove this function soon if isinstance(x, datetime.datetime): return time.mktime(x.timetuple()) else: return x class Failures(object): """ This class tracks the number of failures in a given time window. Failures added are marked with the current timestamp, and this class counts the number of failures in a sliding time window ending at the present. """ def __init__(self, window): """ Initialize with the given window. :param window: how long to track failures for, as a float (number of seconds). """ self.window = window self.failures = collections.deque() self.first_failure_time = None def add_failure(self): """ Add a failure event with the current timestamp. """ failure_time = time.time() if not self.first_failure_time: self.first_failure_time = failure_time self.failures.append(failure_time) def num_failures(self): """ Return the number of failures in the window. """ min_time = time.time() - self.window while self.failures and fix_time(self.failures[0]) < min_time: self.failures.popleft() return len(self.failures) def clear(self): """ Clear the failure queue. """ self.failures.clear() def _get_default(x, default): if x is not None: return x else: return default class Task(object): def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None, params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None, tracking_url=None): self.id = task_id self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active) self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active if deps is None: self.deps = set() else: self.deps = set(deps) self.status = status # PENDING, RUNNING, FAILED or DONE self.time = time.time() # Timestamp when task was first added self.retry = None self.remove = None self.worker_running = None # the worker id that is currently running the task or None self.time_running = None # Timestamp when picked up by worker self.expl = None self.priority = priority self.resources = _get_default(resources, {}) self.family = family self.module = module self.params = _get_default(params, {}) self.disable_failures = disable_failures self.disable_hard_timeout = disable_hard_timeout self.failures = Failures(disable_window) self.tracking_url = tracking_url self.scheduler_disable_time = None self.runnable = False def __repr__(self): return "Task(%r)" % vars(self) def add_failure(self): self.failures.add_failure() def has_excessive_failures(self): if (self.failures.first_failure_time is not None and self.disable_hard_timeout): if (time.time() >= self.failures.first_failure_time + self.disable_hard_timeout): return True if self.failures.num_failures() >= self.disable_failures: return True return False def can_disable(self): return (self.disable_failures is not None or self.disable_hard_timeout is not None) class Worker(object): """ Structure for tracking worker activity and keeping their references. """ def __init__(self, worker_id, last_active=None): self.id = worker_id self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host) self.last_active = last_active or time.time() # seconds since epoch self.last_get_work = None self.started = time.time() # seconds since epoch self.tasks = set() # task objects self.info = {} def add_info(self, info): self.info.update(info) def update(self, worker_reference, get_work=False): if worker_reference: self.reference = worker_reference self.last_active = time.time() if get_work: self.last_get_work = time.time() def prune(self, config): # Delete workers that haven't said anything for a while (probably killed) if self.last_active + config.worker_disconnect_delay < time.time(): return True def get_pending_tasks(self, state): """ Get PENDING (and RUNNING) tasks for this worker. You have to pass in the state for optimization reasons. """ if len(self.tasks) < state.num_pending_tasks(): return six.moves.filter(lambda task: task.status in [PENDING, RUNNING], self.tasks) else: return state.get_pending_tasks() def is_trivial_worker(self, state): """ If it's not an assistant having only tasks that are without requirements. We have to pass the state parameter for optimization reasons. """ if self.assistant: return False return all(not task.resources for task in self.get_pending_tasks(state)) @property def assistant(self): return self.info.get('assistant', False) def __str__(self): return self.id class SimpleTaskState(object): """ Keep track of the current state and handle persistance. The point of this class is to enable other ways to keep state, eg. by using a database These will be implemented by creating an abstract base class that this and other classes inherit from. """ def __init__(self, state_path): self._state_path = state_path self._tasks = {} # map from id to a Task object self._status_tasks = collections.defaultdict(dict) self._active_workers = {} # map from id to a Worker object def get_state(self): return self._tasks, self._active_workers def set_state(self, state): self._tasks, self._active_workers = state def dump(self): try: with open(self._state_path, 'wb') as fobj: pickle.dump(self.get_state(), fobj) except IOError: logger.warning("Failed saving scheduler state", exc_info=1) else: logger.info("Saved state in %s", self._state_path) # prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control? def load(self): if os.path.exists(self._state_path): logger.info("Attempting to load state from %s", self._state_path) try: with open(self._state_path, 'rb') as fobj: state = pickle.load(fobj) except BaseException: logger.exception("Error when loading state. Starting from clean slate.") return self.set_state(state) self._status_tasks = collections.defaultdict(dict) for task in six.itervalues(self._tasks): self._status_tasks[task.status][task.id] = task # Convert from old format # TODO: this is really ugly, we need something more future-proof # Every time we add an attribute to the Worker or Task class, this # code needs to be updated # Compatibility since 2014-06-02 for k, v in six.iteritems(self._active_workers): if isinstance(v, float): self._active_workers[k] = Worker(worker_id=k, last_active=v) # Compatibility since 2015-05-28 if any(not hasattr(w, 'tasks') for k, w in six.iteritems(self._active_workers)): # If you load from an old format where Workers don't contain tasks. for k, worker in six.iteritems(self._active_workers): worker.tasks = set() for task in six.itervalues(self._tasks): for worker_id in task.workers: self._active_workers[worker_id].tasks.add(task) # Compatibility since 2015-04-28 if any(not hasattr(t, 'disable_hard_timeout') for t in six.itervalues(self._tasks)): for t in six.itervalues(self._tasks): t.disable_hard_timeout = None else: logger.info("No prior state file exists at %s. Starting with clean slate", self._state_path) def get_active_tasks(self, status=None): if status: for task in six.itervalues(self._status_tasks[status]): yield task else: for task in six.itervalues(self._tasks): yield task def get_running_tasks(self): return six.itervalues(self._status_tasks[RUNNING]) def get_pending_tasks(self): return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status]) for status in [PENDING, RUNNING]) def num_pending_tasks(self): """ Return how many tasks are PENDING + RUNNING. O(1). """ return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING]) def get_task(self, task_id, default=None, setdefault=None): if setdefault: task = self._tasks.setdefault(task_id, setdefault) self._status_tasks[task.status][task.id] = task return task else: return self._tasks.get(task_id, default) def has_task(self, task_id): return task_id in self._tasks def re_enable(self, task, config=None): task.scheduler_disable_time = None task.failures.clear() if config: self.set_status(task, FAILED, config) task.failures.clear() def set_status(self, task, new_status, config=None): if new_status == FAILED: assert config is not None if new_status == DISABLED and task.status == RUNNING: return if task.status == DISABLED: if new_status == DONE: self.re_enable(task) # don't allow workers to override a scheduler disable elif task.scheduler_disable_time is not None: return if new_status == FAILED and task.can_disable() and task.status != DISABLED: task.add_failure() if task.has_excessive_failures(): task.scheduler_disable_time = time.time() new_status = DISABLED notifications.send_error_email( 'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id), '{task} failed {failures} times in the last {window} seconds, so it is being ' 'disabled for {persist} seconds'.format( failures=config.disable_failures, task=task.id, window=config.disable_window, persist=config.disable_persist, )) elif new_status == DISABLED: task.scheduler_disable_time = None self._status_tasks[task.status].pop(task.id) self._status_tasks[new_status][task.id] = task task.status = new_status def fail_dead_worker_task(self, task, config, assistants): # If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants: logger.info("Task %r is marked as running by disconnected worker %r -> marking as " "FAILED with retry delay of %rs", task.id, task.worker_running, config.retry_delay) task.worker_running = None self.set_status(task, FAILED, config) task.retry = time.time() + config.retry_delay def prune(self, task, config): remove = False # Mark tasks with no remaining active stakeholders for deletion if not task.stakeholders: if task.remove is None: logger.info("Task %r has stakeholders %r but none remain connected -> will remove " "task in %s seconds", task.id, task.stakeholders, config.remove_delay) task.remove = time.time() + config.remove_delay # Re-enable task after the disable time expires if task.status == DISABLED and task.scheduler_disable_time is not None: if time.time() - fix_time(task.scheduler_disable_time) > config.disable_persist: self.re_enable(task, config) # Remove tasks that have no stakeholders if task.remove and time.time() > task.remove: logger.info("Removing task %r (no connected stakeholders)", task.id) remove = True # Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0 if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time(): self.set_status(task, PENDING, config) return remove def inactivate_tasks(self, delete_tasks): # The terminology is a bit confusing: we used to "delete" tasks when they became inactive, # but with a pluggable state storage, you might very well want to keep some history of # older tasks as well. That's why we call it "inactivate" (as in the verb) for task in delete_tasks: task_obj = self._tasks.pop(task) self._status_tasks[task_obj.status].pop(task) def get_active_workers(self, last_active_lt=None, last_get_work_gt=None): for worker in six.itervalues(self._active_workers): if last_active_lt is not None and worker.last_active >= last_active_lt: continue last_get_work = getattr(worker, 'last_get_work', None) if last_get_work_gt is not None and ( last_get_work is None or last_get_work <= last_get_work_gt): continue yield worker def get_assistants(self, last_active_lt=None): return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt)) def get_worker_ids(self): return self._active_workers.keys() # only used for unit tests def get_worker(self, worker_id): return self._active_workers.setdefault(worker_id, Worker(worker_id)) def inactivate_workers(self, delete_workers): # Mark workers as inactive for worker in delete_workers: self._active_workers.pop(worker) # remove workers from tasks for task in self.get_active_tasks(): task.stakeholders.difference_update(delete_workers) task.workers.difference_update(delete_workers) def get_necessary_tasks(self): necessary_tasks = set() for task in self.get_active_tasks(): if task.status not in (DONE, DISABLED) or \ getattr(task, 'scheduler_disable_time', None) is not None: necessary_tasks.update(task.deps) necessary_tasks.add(task.id) return necessary_tasks class CentralPlannerScheduler(Scheduler): """ Async scheduler that can handle multiple workers, etc. Can be run locally or on a server (using RemoteScheduler + server.Server). """ def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs): """ Keyword Arguments: :param config: an object of class "scheduler" or None (in which the global instance will be used) :param resources: a dict of str->int constraints :param task_history_override: ignore config and use this object as the task history """ self._config = config or scheduler(**kwargs) self._state = SimpleTaskState(self._config.state_path) if task_history_impl: self._task_history = task_history_impl elif self._config.record_task_history: from luigi import db_task_history # Needs sqlalchemy, thus imported here self._task_history = db_task_history.DbTaskHistory() else: self._task_history = history.NopHistory() self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter? self._make_task = functools.partial( Task, disable_failures=self._config.disable_failures, disable_hard_timeout=self._config.disable_hard_timeout, disable_window=self._config.disable_window) self._worker_requests = {} def load(self): self._state.load() def dump(self): self._state.dump() def prune(self): logger.info("Starting pruning of task graph") remove_workers = [] for worker in self._state.get_active_workers(): if worker.prune(self._config): logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay) remove_workers.append(worker.id) self._state.inactivate_workers(remove_workers) assistant_ids = set(w.id for w in self._state.get_assistants()) remove_tasks = [] if assistant_ids: necessary_tasks = self._state.get_necessary_tasks() else: necessary_tasks = () for task in self._state.get_active_tasks(): self._state.fail_dead_worker_task(task, self._config, assistant_ids) if task.id not in necessary_tasks and self._state.prune(task, self._config): remove_tasks.append(task.id) self._state.inactivate_tasks(remove_tasks) logger.info("Done pruning task graph") def update(self, worker_id, worker_reference=None, get_work=False): """ Keep track of whenever the worker was last active. """ worker = self._state.get_worker(worker_id) worker.update(worker_reference, get_work=get_work) def _update_priority(self, task, prio, worker): """ Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled. """ task.priority = prio = max(prio, task.priority) for dep in task.deps or []: t = self._state.get_task(dep) if t is not None and prio > t.priority: self._update_priority(t, prio, worker) def add_task(self, task_id=None, status=PENDING, runnable=True, deps=None, new_deps=None, expl=None, resources=None, priority=0, family='', module=None, params=None, assistant=False, tracking_url=None, **kwargs): """ * add task identified by task_id if it doesn't exist * if deps is not None, update dependency list * update status of task * add additional workers/stakeholders * update priority when needed """ worker_id = kwargs['worker'] self.update(worker_id) task = self._state.get_task(task_id, setdefault=self._make_task( task_id=task_id, status=PENDING, deps=deps, resources=resources, priority=priority, family=family, module=module, params=params)) # for setting priority, we'll sometimes create tasks with unset family and params if not task.family: task.family = family if not getattr(task, 'module', None): task.module = module if not task.params: task.params = _get_default(params, {}) if tracking_url is not None or task.status != RUNNING: task.tracking_url = tracking_url if task.remove is not None: task.remove = None # unmark task for removal so it isn't removed after being added if expl is not None: task.expl = expl if not (task.status == RUNNING and status == PENDING) or new_deps: # don't allow re-scheduling of task while it is running, it must either fail or succeed first if status == PENDING or status != task.status: # Update the DB only if there was a acctual change, to prevent noise. # We also check for status == PENDING b/c that's the default value # (so checking for status != task.status woule lie) self._update_task_history(task, status) self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config) if status == FAILED: task.retry = self._retry_time(task, self._config) if deps is not None: task.deps = set(deps) if new_deps is not None: task.deps.update(new_deps) if resources is not None: task.resources = resources if not assistant: task.stakeholders.add(worker_id) # Task dependencies might not exist yet. Let's create dummy tasks for them for now. # Otherwise the task dependencies might end up being pruned if scheduling takes a long time for dep in task.deps or []: t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority)) t.stakeholders.add(worker_id) self._update_priority(task, priority, worker_id) if runnable: task.workers.add(worker_id) self._state.get_worker(worker_id).tasks.add(task) task.runnable = runnable def add_worker(self, worker, info, **kwargs): self._state.get_worker(worker).add_info(info) def update_resources(self, **resources): if self._resources is None: self._resources = {} self._resources.update(resources) def _has_resources(self, needed_resources, used_resources): if needed_resources is None: return True available_resources = self._resources or {} for resource, amount in six.iteritems(needed_resources): if amount + used_resources[resource] > available_resources.get(resource, 1): return False return True def _used_resources(self): used_resources = collections.defaultdict(int) if self._resources is not None: for task in self._state.get_active_tasks(): if task.status == RUNNING and task.resources: for resource, amount in six.iteritems(task.resources): used_resources[resource] += amount return used_resources def _rank(self, task): """ Return worker's rank function for task scheduling. :return: """ return task.priority, -task.time def _schedulable(self, task): if task.status != PENDING: return False for dep in task.deps: dep_task = self._state.get_task(dep, default=None) if dep_task is None or dep_task.status != DONE: return False return True def _retry_time(self, task, config): return time.time() + config.retry_delay def get_work(self, host=None, assistant=False, **kwargs): # TODO: remove any expired nodes # Algo: iterate over all nodes, find the highest priority node no dependencies and available # resources. # Resource checking looks both at currently available resources and at which resources would # be available if all running tasks died and we rescheduled all workers greedily. We do both # checks in order to prevent a worker with many low-priority tasks from starving other # workers with higher priority tasks that share the same resources. # TODO: remove tasks that can't be done, figure out if the worker has absolutely # nothing it can wait for if self._config.prune_on_get_work: self.prune() worker_id = kwargs['worker'] # Return remaining tasks that have no FAILED descendants self.update(worker_id, {'host': host}, get_work=True) if assistant: self.add_worker(worker_id, [('assistant', assistant)]) best_task = None locally_pending_tasks = 0 running_tasks = [] upstream_table = {} greedy_resources = collections.defaultdict(int) n_unique_pending = 0 worker = self._state.get_worker(worker_id) if worker.is_trivial_worker(self._state): relevant_tasks = worker.get_pending_tasks(self._state) used_resources = collections.defaultdict(int) greedy_workers = dict() # If there's no resources, then they can grab any task else: relevant_tasks = self._state.get_pending_tasks() used_resources = self._used_resources() activity_limit = time.time() - self._config.worker_disconnect_delay active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit) greedy_workers = dict((worker.id, worker.info.get('workers', 1)) for worker in active_workers) tasks = list(relevant_tasks) tasks.sort(key=self._rank, reverse=True) for task in tasks: upstream_status = self._upstream_status(task.id, upstream_table) in_workers = (assistant and getattr(task, 'runnable', bool(task.workers))) or worker_id in task.workers if task.status == RUNNING and in_workers: # Return a list of currently running tasks to the client, # makes it easier to troubleshoot other_worker = self._state.get_worker(task.worker_running) more_info = {'task_id': task.id, 'worker': str(other_worker)} if other_worker is not None: more_info.update(other_worker.info) running_tasks.append(more_info) if task.status == PENDING and in_workers and upstream_status != UPSTREAM_DISABLED: locally_pending_tasks += 1 if len(task.workers) == 1 and not assistant: n_unique_pending += 1 if best_task: continue if task.status == RUNNING and (task.worker_running in greedy_workers): greedy_workers[task.worker_running] -= 1 for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount if self._schedulable(task) and self._has_resources(task.resources, greedy_resources): if in_workers and self._has_resources(task.resources, used_resources): best_task = task else: workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers for task_worker in workers: if greedy_workers.get(task_worker, 0) > 0: # use up a worker greedy_workers[task_worker] -= 1 # keep track of the resources used in greedy scheduling for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount break reply = {'n_pending_tasks': locally_pending_tasks, 'running_tasks': running_tasks, 'task_id': None, 'n_unique_pending': n_unique_pending} if best_task: self._state.set_status(best_task, RUNNING, self._config) best_task.worker_running = worker_id best_task.time_running = time.time() self._update_task_history(best_task, RUNNING, host=host) reply['task_id'] = best_task.id reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = best_task.params return reply def ping(self, **kwargs): worker_id = kwargs['worker'] self.update(worker_id) def _upstream_status(self, task_id, upstream_status_table): if task_id in upstream_status_table: return upstream_status_table[task_id] elif self._state.has_task(task_id): task_stack = [task_id] while task_stack: dep_id = task_stack.pop() if self._state.has_task(dep_id): dep = self._state.get_task(dep_id) if dep.status == DONE: continue if dep_id not in upstream_status_table: if dep.status == PENDING and dep.deps: task_stack = task_stack + [dep_id] + list(dep.deps) upstream_status_table[dep_id] = '' # will be updated postorder else: dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '') upstream_status_table[dep_id] = dep_status elif upstream_status_table[dep_id] == '' and dep.deps: # This is the postorder update step when we set the # status based on the previously calculated child elements upstream_status = [upstream_status_table.get(a_task_id, '') for a_task_id in dep.deps] upstream_status.append('') # to handle empty list status = max(upstream_status, key=UPSTREAM_SEVERITY_KEY) upstream_status_table[dep_id] = status return upstream_status_table[dep_id] def _serialize_task(self, task_id, include_deps=True): task = self._state.get_task(task_id) ret = { 'status': task.status, 'workers': list(task.workers), 'worker_running': task.worker_running, 'time_running': getattr(task, "time_running", None), 'start_time': task.time, 'params': task.params, 'name': task.family, 'priority': task.priority, 'resources': task.resources, 'tracking_url': getattr(task, "tracking_url", None), } if task.status == DISABLED: ret['re_enable_able'] = task.scheduler_disable_time is not None if include_deps: ret['deps'] = list(task.deps) return ret def graph(self, **kwargs): self.prune() serialized = {} for task in self._state.get_active_tasks(): serialized[task.id] = self._serialize_task(task.id) return serialized def _recurse_deps(self, task_id, serialized): if task_id not in serialized: task = self._state.get_task(task_id) if task is None or not task.family: logger.warn('Missing task for id [%s]', task_id) # try to infer family and params from task_id try: family, _, param_str = task_id.rstrip(')').partition('(') params = dict(param.split('=') for param in param_str.split(', ')) except BaseException: family, params = '', {} serialized[task_id] = { 'deps': [], 'status': UNKNOWN, 'workers': [], 'start_time': UNKNOWN, 'params': params, 'name': family, 'priority': 0, } else: serialized[task_id] = self._serialize_task(task_id) for dep in task.deps: self._recurse_deps(dep, serialized) def dep_graph(self, task_id, **kwargs): self.prune() serialized = {} if self._state.has_task(task_id): self._recurse_deps(task_id, serialized) return serialized def task_list(self, status, upstream_status, limit=True, search=None, **kwargs): """ Query for a subset of tasks by status. """ self.prune() result = {} upstream_status_table = {} # used to memoize upstream status if search is None: filter_func = lambda _: True else: terms = search.split() filter_func = lambda t: all(term in t.id for term in terms) for task in filter(filter_func, self._state.get_active_tasks(status)): if (task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table)): serialized = self._serialize_task(task.id, False) result[task.id] = serialized if limit and len(result) > self._config.max_shown_tasks: return {'num_tasks': len(result)} return result def worker_list(self, include_running=True, **kwargs): self.prune() workers = [ dict( name=worker.id, last_active=worker.last_active, started=getattr(worker, 'started', None), **worker.info ) for worker in self._state.get_active_workers()] workers.sort(key=lambda worker: worker['started'], reverse=True) if include_running: running = collections.defaultdict(dict) num_pending = collections.defaultdict(int) num_uniques = collections.defaultdict(int) for task in self._state.get_pending_tasks(): if task.status == RUNNING and task.worker_running: running[task.worker_running][task.id] = self._serialize_task(task.id, False) elif task.status == PENDING: for worker in task.workers: num_pending[worker] += 1 if len(task.workers) == 1: num_uniques[list(task.workers)[0]] += 1 for worker in workers: tasks = running[worker['name']] worker['num_running'] = len(tasks) worker['num_pending'] = num_pending[worker['name']] worker['num_uniques'] = num_uniques[worker['name']] worker['running'] = tasks return workers def inverse_dep_graph(self, task_id, **kwargs): self.prune() serialized = {} if self._state.has_task(task_id): self._traverse_inverse_deps(task_id, serialized) return serialized def _traverse_inverse_deps(self, task_id, serialized): stack = [task_id] serialized[task_id] = self._serialize_task(task_id) while len(stack) > 0: curr_id = stack.pop() for task in self._state.get_active_tasks(): if curr_id in task.deps: serialized[curr_id]["deps"].append(task.id) if task.id not in serialized: serialized[task.id] = self._serialize_task(task.id) serialized[task.id]["deps"] = [] stack.append(task.id) def task_search(self, task_str, **kwargs): """ Query for a subset of tasks by task_id. :param task_str: :return: """ self.prune() result = collections.defaultdict(dict) for task in self._state.get_active_tasks(): if task.id.find(task_str) != -1: serialized = self._serialize_task(task.id, False) result[task.status][task.id] = serialized return result def re_enable_task(self, task_id): serialized = {} task = self._state.get_task(task_id) if task and task.status == DISABLED and task.scheduler_disable_time: self._state.re_enable(task, self._config) serialized = self._serialize_task(task_id) return serialized def fetch_error(self, task_id, **kwargs): if self._state.has_task(task_id): return {"taskId": task_id, "error": self._state.get_task(task_id).expl} else: return {"taskId": task_id, "error": ""} def _update_task_history(self, task, status, host=None): try: if status == DONE or status == FAILED: successful = (status == DONE) self._task_history.task_finished(task, successful) elif status == PENDING: self._task_history.task_scheduled(task) elif status == RUNNING: self._task_history.task_started(task, host) except BaseException: logger.warning("Error saving Task history", exc_info=True) @property def task_history(self): # Used by server.py to expose the calls return self._task_history # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The system for scheduling tasks and executing them in order. Deals with dependencies, priorities, resources, etc. The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them. See :doc:`/central_scheduler` for more info. """ import collections try: import cPickle as pickle except ImportError: import pickle import datetime import functools import itertools import logging import os import time from luigi import six from luigi import configuration from luigi import notifications from luigi import parameter from luigi import task_history as history from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN from luigi.task import Config logger = logging.getLogger("luigi.server") class Scheduler(object): """ Abstract base class. Note that the methods all take string arguments, not Task objects... """"" add_task = NotImplemented get_work = NotImplemented ping = NotImplemented UPSTREAM_RUNNING = 'UPSTREAM_RUNNING' UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT' UPSTREAM_FAILED = 'UPSTREAM_FAILED' UPSTREAM_DISABLED = 'UPSTREAM_DISABLED' UPSTREAM_SEVERITY_ORDER = ( '', UPSTREAM_RUNNING, UPSTREAM_MISSING_INPUT, UPSTREAM_FAILED, UPSTREAM_DISABLED, ) UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index STATUS_TO_UPSTREAM_MAP = { FAILED: UPSTREAM_FAILED, RUNNING: UPSTREAM_RUNNING, PENDING: UPSTREAM_MISSING_INPUT, DISABLED: UPSTREAM_DISABLED, } class scheduler(Config): # TODO(erikbern): the config_path is needed for backwards compatilibity. We should drop the compatibility # at some point (in particular this would force users to replace all dashes with underscores in the config) retry_delay = parameter.FloatParameter(default=900.0) remove_delay = parameter.FloatParameter(default=600.0) worker_disconnect_delay = parameter.FloatParameter(default=60.0) state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle') # Jobs are disabled if we see more than disable_failures failures in disable_window seconds. # These disables last for disable_persist seconds. disable_window = parameter.IntParameter(default=3600, config_path=dict(section='scheduler', name='disable-window-seconds')) disable_failures = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-num-failures')) disable_hard_timeout = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-hard-timeout')) disable_persist = parameter.IntParameter(default=86400, config_path=dict(section='scheduler', name='disable-persist-seconds')) max_shown_tasks = parameter.IntParameter(default=100000) prune_done_tasks = parameter.BoolParameter(default=False) record_task_history = parameter.BoolParameter(default=False) prune_on_get_work = parameter.BoolParameter(default=False) def fix_time(x): # Backwards compatibility for a fix in Dec 2014. Prior to the fix, pickled state might store datetime objects # Let's remove this function soon if isinstance(x, datetime.datetime): return time.mktime(x.timetuple()) else: return x class Failures(object): """ This class tracks the number of failures in a given time window. Failures added are marked with the current timestamp, and this class counts the number of failures in a sliding time window ending at the present. """ def __init__(self, window): """ Initialize with the given window. :param window: how long to track failures for, as a float (number of seconds). """ self.window = window self.failures = collections.deque() self.first_failure_time = None def add_failure(self): """ Add a failure event with the current timestamp. """ failure_time = time.time() if not self.first_failure_time: self.first_failure_time = failure_time self.failures.append(failure_time) def num_failures(self): """ Return the number of failures in the window. """ min_time = time.time() - self.window while self.failures and fix_time(self.failures[0]) < min_time: self.failures.popleft() return len(self.failures) def clear(self): """ Clear the failure queue. """ self.failures.clear() def _get_default(x, default): if x is not None: return x else: return default class Task(object): def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None, params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None, tracking_url=None): self.id = task_id self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active) self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active if deps is None: self.deps = set() else: self.deps = set(deps) self.status = status # PENDING, RUNNING, FAILED or DONE self.time = time.time() # Timestamp when task was first added self.retry = None self.remove = None self.worker_running = None # the worker id that is currently running the task or None self.time_running = None # Timestamp when picked up by worker self.expl = None self.priority = priority self.resources = _get_default(resources, {}) self.family = family self.module = module self.params = _get_default(params, {}) self.disable_failures = disable_failures self.disable_hard_timeout = disable_hard_timeout self.failures = Failures(disable_window) self.tracking_url = tracking_url self.scheduler_disable_time = None self.runnable = False def __repr__(self): return "Task(%r)" % vars(self) def add_failure(self): self.failures.add_failure() def has_excessive_failures(self): if (self.failures.first_failure_time is not None and self.disable_hard_timeout): if (time.time() >= self.failures.first_failure_time + self.disable_hard_timeout): return True if self.failures.num_failures() >= self.disable_failures: return True return False def can_disable(self): return (self.disable_failures is not None or self.disable_hard_timeout is not None) class Worker(object): """ Structure for tracking worker activity and keeping their references. """ def __init__(self, worker_id, last_active=None): self.id = worker_id self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host) self.last_active = last_active or time.time() # seconds since epoch self.last_get_work = None self.started = time.time() # seconds since epoch self.tasks = set() # task objects self.info = {} def add_info(self, info): self.info.update(info) def update(self, worker_reference, get_work=False): if worker_reference: self.reference = worker_reference self.last_active = time.time() if get_work: self.last_get_work = time.time() def prune(self, config): # Delete workers that haven't said anything for a while (probably killed) if self.last_active + config.worker_disconnect_delay < time.time(): return True def get_pending_tasks(self, state): """ Get PENDING (and RUNNING) tasks for this worker. You have to pass in the state for optimization reasons. """ if len(self.tasks) < state.num_pending_tasks(): return six.moves.filter(lambda task: task.status in [PENDING, RUNNING], self.tasks) else: return state.get_pending_tasks() def is_trivial_worker(self, state): """ If it's not an assistant having only tasks that are without requirements. We have to pass the state parameter for optimization reasons. """ if self.assistant: return False return all(not task.resources for task in self.get_pending_tasks(state)) @property def assistant(self): return self.info.get('assistant', False) def __str__(self): return self.id class SimpleTaskState(object): """ Keep track of the current state and handle persistance. The point of this class is to enable other ways to keep state, eg. by using a database These will be implemented by creating an abstract base class that this and other classes inherit from. """ def __init__(self, state_path): self._state_path = state_path self._tasks = {} # map from id to a Task object self._status_tasks = collections.defaultdict(dict) self._active_workers = {} # map from id to a Worker object def get_state(self): return self._tasks, self._active_workers def set_state(self, state): self._tasks, self._active_workers = state def dump(self): try: with open(self._state_path, 'wb') as fobj: pickle.dump(self.get_state(), fobj) except IOError: logger.warning("Failed saving scheduler state", exc_info=1) else: logger.info("Saved state in %s", self._state_path) # prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control? def load(self): if os.path.exists(self._state_path): logger.info("Attempting to load state from %s", self._state_path) try: with open(self._state_path, 'rb') as fobj: state = pickle.load(fobj) except BaseException: logger.exception("Error when loading state. Starting from clean slate.") return self.set_state(state) self._status_tasks = collections.defaultdict(dict) for task in six.itervalues(self._tasks): self._status_tasks[task.status][task.id] = task # Convert from old format # TODO: this is really ugly, we need something more future-proof # Every time we add an attribute to the Worker or Task class, this # code needs to be updated # Compatibility since 2014-06-02 for k, v in six.iteritems(self._active_workers): if isinstance(v, float): self._active_workers[k] = Worker(worker_id=k, last_active=v) # Compatibility since 2015-05-28 if any(not hasattr(w, 'tasks') for k, w in six.iteritems(self._active_workers)): # If you load from an old format where Workers don't contain tasks. for k, worker in six.iteritems(self._active_workers): worker.tasks = set() for task in six.itervalues(self._tasks): for worker_id in task.workers: self._active_workers[worker_id].tasks.add(task) # Compatibility since 2015-04-28 if any(not hasattr(t, 'disable_hard_timeout') for t in six.itervalues(self._tasks)): for t in six.itervalues(self._tasks): t.disable_hard_timeout = None else: logger.info("No prior state file exists at %s. Starting with clean slate", self._state_path) def get_active_tasks(self, status=None): if status: for task in six.itervalues(self._status_tasks[status]): yield task else: for task in six.itervalues(self._tasks): yield task def get_running_tasks(self): return six.itervalues(self._status_tasks[RUNNING]) def get_pending_tasks(self): return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status]) for status in [PENDING, RUNNING]) def num_pending_tasks(self): """ Return how many tasks are PENDING + RUNNING. O(1). """ return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING]) def get_task(self, task_id, default=None, setdefault=None): if setdefault: task = self._tasks.setdefault(task_id, setdefault) self._status_tasks[task.status][task.id] = task return task else: return self._tasks.get(task_id, default) def has_task(self, task_id): return task_id in self._tasks def re_enable(self, task, config=None): task.scheduler_disable_time = None task.failures.clear() if config: self.set_status(task, FAILED, config) task.failures.clear() def set_status(self, task, new_status, config=None): if new_status == FAILED: assert config is not None if new_status == DISABLED and task.status == RUNNING: return if task.status == DISABLED: if new_status == DONE: self.re_enable(task) # don't allow workers to override a scheduler disable elif task.scheduler_disable_time is not None and new_status != DISABLED: return if new_status == FAILED and task.can_disable() and task.status != DISABLED: task.add_failure() if task.has_excessive_failures(): task.scheduler_disable_time = time.time() new_status = DISABLED notifications.send_error_email( 'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id), '{task} failed {failures} times in the last {window} seconds, so it is being ' 'disabled for {persist} seconds'.format( failures=config.disable_failures, task=task.id, window=config.disable_window, persist=config.disable_persist, )) elif new_status == DISABLED: task.scheduler_disable_time = None self._status_tasks[task.status].pop(task.id) self._status_tasks[new_status][task.id] = task task.status = new_status def fail_dead_worker_task(self, task, config, assistants): # If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants: logger.info("Task %r is marked as running by disconnected worker %r -> marking as " "FAILED with retry delay of %rs", task.id, task.worker_running, config.retry_delay) task.worker_running = None self.set_status(task, FAILED, config) task.retry = time.time() + config.retry_delay def prune(self, task, config): remove = False # Mark tasks with no remaining active stakeholders for deletion if not task.stakeholders: if task.remove is None: logger.info("Task %r has stakeholders %r but none remain connected -> will remove " "task in %s seconds", task.id, task.stakeholders, config.remove_delay) task.remove = time.time() + config.remove_delay # Re-enable task after the disable time expires if task.status == DISABLED and task.scheduler_disable_time is not None: if time.time() - fix_time(task.scheduler_disable_time) > config.disable_persist: self.re_enable(task, config) # Remove tasks that have no stakeholders if task.remove and time.time() > task.remove: logger.info("Removing task %r (no connected stakeholders)", task.id) remove = True # Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0 if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time(): self.set_status(task, PENDING, config) return remove def inactivate_tasks(self, delete_tasks): # The terminology is a bit confusing: we used to "delete" tasks when they became inactive, # but with a pluggable state storage, you might very well want to keep some history of # older tasks as well. That's why we call it "inactivate" (as in the verb) for task in delete_tasks: task_obj = self._tasks.pop(task) self._status_tasks[task_obj.status].pop(task) def get_active_workers(self, last_active_lt=None, last_get_work_gt=None): for worker in six.itervalues(self._active_workers): if last_active_lt is not None and worker.last_active >= last_active_lt: continue last_get_work = getattr(worker, 'last_get_work', None) if last_get_work_gt is not None and ( last_get_work is None or last_get_work <= last_get_work_gt): continue yield worker def get_assistants(self, last_active_lt=None): return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt)) def get_worker_ids(self): return self._active_workers.keys() # only used for unit tests def get_worker(self, worker_id): return self._active_workers.setdefault(worker_id, Worker(worker_id)) def inactivate_workers(self, delete_workers): # Mark workers as inactive for worker in delete_workers: self._active_workers.pop(worker) # remove workers from tasks for task in self.get_active_tasks(): task.stakeholders.difference_update(delete_workers) task.workers.difference_update(delete_workers) def get_necessary_tasks(self): necessary_tasks = set() for task in self.get_active_tasks(): if task.status not in (DONE, DISABLED) or \ getattr(task, 'scheduler_disable_time', None) is not None: necessary_tasks.update(task.deps) necessary_tasks.add(task.id) return necessary_tasks class CentralPlannerScheduler(Scheduler): """ Async scheduler that can handle multiple workers, etc. Can be run locally or on a server (using RemoteScheduler + server.Server). """ def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs): """ Keyword Arguments: :param config: an object of class "scheduler" or None (in which the global instance will be used) :param resources: a dict of str->int constraints :param task_history_override: ignore config and use this object as the task history """ self._config = config or scheduler(**kwargs) self._state = SimpleTaskState(self._config.state_path) if task_history_impl: self._task_history = task_history_impl elif self._config.record_task_history: from luigi import db_task_history # Needs sqlalchemy, thus imported here self._task_history = db_task_history.DbTaskHistory() else: self._task_history = history.NopHistory() self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter? self._make_task = functools.partial( Task, disable_failures=self._config.disable_failures, disable_hard_timeout=self._config.disable_hard_timeout, disable_window=self._config.disable_window) self._worker_requests = {} def load(self): self._state.load() def dump(self): self._state.dump() def prune(self): logger.info("Starting pruning of task graph") remove_workers = [] for worker in self._state.get_active_workers(): if worker.prune(self._config): logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay) remove_workers.append(worker.id) self._state.inactivate_workers(remove_workers) assistant_ids = set(w.id for w in self._state.get_assistants()) remove_tasks = [] if assistant_ids: necessary_tasks = self._state.get_necessary_tasks() else: necessary_tasks = () for task in self._state.get_active_tasks(): self._state.fail_dead_worker_task(task, self._config, assistant_ids) if task.id not in necessary_tasks and self._state.prune(task, self._config): remove_tasks.append(task.id) self._state.inactivate_tasks(remove_tasks) logger.info("Done pruning task graph") def update(self, worker_id, worker_reference=None, get_work=False): """ Keep track of whenever the worker was last active. """ worker = self._state.get_worker(worker_id) worker.update(worker_reference, get_work=get_work) def _update_priority(self, task, prio, worker): """ Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled. """ task.priority = prio = max(prio, task.priority) for dep in task.deps or []: t = self._state.get_task(dep) if t is not None and prio > t.priority: self._update_priority(t, prio, worker) def add_task(self, task_id=None, status=PENDING, runnable=True, deps=None, new_deps=None, expl=None, resources=None, priority=0, family='', module=None, params=None, assistant=False, tracking_url=None, **kwargs): """ * add task identified by task_id if it doesn't exist * if deps is not None, update dependency list * update status of task * add additional workers/stakeholders * update priority when needed """ worker_id = kwargs['worker'] self.update(worker_id) task = self._state.get_task(task_id, setdefault=self._make_task( task_id=task_id, status=PENDING, deps=deps, resources=resources, priority=priority, family=family, module=module, params=params)) # for setting priority, we'll sometimes create tasks with unset family and params if not task.family: task.family = family if not getattr(task, 'module', None): task.module = module if not task.params: task.params = _get_default(params, {}) if tracking_url is not None or task.status != RUNNING: task.tracking_url = tracking_url if task.remove is not None: task.remove = None # unmark task for removal so it isn't removed after being added if expl is not None: task.expl = expl if not (task.status == RUNNING and status == PENDING) or new_deps: # don't allow re-scheduling of task while it is running, it must either fail or succeed first if status == PENDING or status != task.status: # Update the DB only if there was a acctual change, to prevent noise. # We also check for status == PENDING b/c that's the default value # (so checking for status != task.status woule lie) self._update_task_history(task, status) self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config) if status == FAILED: task.retry = self._retry_time(task, self._config) if deps is not None: task.deps = set(deps) if new_deps is not None: task.deps.update(new_deps) if resources is not None: task.resources = resources if not assistant: task.stakeholders.add(worker_id) # Task dependencies might not exist yet. Let's create dummy tasks for them for now. # Otherwise the task dependencies might end up being pruned if scheduling takes a long time for dep in task.deps or []: t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority)) t.stakeholders.add(worker_id) self._update_priority(task, priority, worker_id) if runnable: task.workers.add(worker_id) self._state.get_worker(worker_id).tasks.add(task) task.runnable = runnable def add_worker(self, worker, info, **kwargs): self._state.get_worker(worker).add_info(info) def update_resources(self, **resources): if self._resources is None: self._resources = {} self._resources.update(resources) def _has_resources(self, needed_resources, used_resources): if needed_resources is None: return True available_resources = self._resources or {} for resource, amount in six.iteritems(needed_resources): if amount + used_resources[resource] > available_resources.get(resource, 1): return False return True def _used_resources(self): used_resources = collections.defaultdict(int) if self._resources is not None: for task in self._state.get_active_tasks(): if task.status == RUNNING and task.resources: for resource, amount in six.iteritems(task.resources): used_resources[resource] += amount return used_resources def _rank(self, task): """ Return worker's rank function for task scheduling. :return: """ return task.priority, -task.time def _schedulable(self, task): if task.status != PENDING: return False for dep in task.deps: dep_task = self._state.get_task(dep, default=None) if dep_task is None or dep_task.status != DONE: return False return True def _retry_time(self, task, config): return time.time() + config.retry_delay def get_work(self, host=None, assistant=False, **kwargs): # TODO: remove any expired nodes # Algo: iterate over all nodes, find the highest priority node no dependencies and available # resources. # Resource checking looks both at currently available resources and at which resources would # be available if all running tasks died and we rescheduled all workers greedily. We do both # checks in order to prevent a worker with many low-priority tasks from starving other # workers with higher priority tasks that share the same resources. # TODO: remove tasks that can't be done, figure out if the worker has absolutely # nothing it can wait for if self._config.prune_on_get_work: self.prune() worker_id = kwargs['worker'] # Return remaining tasks that have no FAILED descendants self.update(worker_id, {'host': host}, get_work=True) if assistant: self.add_worker(worker_id, [('assistant', assistant)]) best_task = None locally_pending_tasks = 0 running_tasks = [] upstream_table = {} greedy_resources = collections.defaultdict(int) n_unique_pending = 0 worker = self._state.get_worker(worker_id) if worker.is_trivial_worker(self._state): relevant_tasks = worker.get_pending_tasks(self._state) used_resources = collections.defaultdict(int) greedy_workers = dict() # If there's no resources, then they can grab any task else: relevant_tasks = self._state.get_pending_tasks() used_resources = self._used_resources() activity_limit = time.time() - self._config.worker_disconnect_delay active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit) greedy_workers = dict((worker.id, worker.info.get('workers', 1)) for worker in active_workers) tasks = list(relevant_tasks) tasks.sort(key=self._rank, reverse=True) for task in tasks: upstream_status = self._upstream_status(task.id, upstream_table) in_workers = (assistant and getattr(task, 'runnable', bool(task.workers))) or worker_id in task.workers if task.status == RUNNING and in_workers: # Return a list of currently running tasks to the client, # makes it easier to troubleshoot other_worker = self._state.get_worker(task.worker_running) more_info = {'task_id': task.id, 'worker': str(other_worker)} if other_worker is not None: more_info.update(other_worker.info) running_tasks.append(more_info) if task.status == PENDING and in_workers and upstream_status != UPSTREAM_DISABLED: locally_pending_tasks += 1 if len(task.workers) == 1 and not assistant: n_unique_pending += 1 if best_task: continue if task.status == RUNNING and (task.worker_running in greedy_workers): greedy_workers[task.worker_running] -= 1 for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount if self._schedulable(task) and self._has_resources(task.resources, greedy_resources): if in_workers and self._has_resources(task.resources, used_resources): best_task = task else: workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers for task_worker in workers: if greedy_workers.get(task_worker, 0) > 0: # use up a worker greedy_workers[task_worker] -= 1 # keep track of the resources used in greedy scheduling for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount break reply = {'n_pending_tasks': locally_pending_tasks, 'running_tasks': running_tasks, 'task_id': None, 'n_unique_pending': n_unique_pending} if best_task: self._state.set_status(best_task, RUNNING, self._config) best_task.worker_running = worker_id best_task.time_running = time.time() self._update_task_history(best_task, RUNNING, host=host) reply['task_id'] = best_task.id reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = best_task.params return reply def ping(self, **kwargs): worker_id = kwargs['worker'] self.update(worker_id) def _upstream_status(self, task_id, upstream_status_table): if task_id in upstream_status_table: return upstream_status_table[task_id] elif self._state.has_task(task_id): task_stack = [task_id] while task_stack: dep_id = task_stack.pop() if self._state.has_task(dep_id): dep = self._state.get_task(dep_id) if dep.status == DONE: continue if dep_id not in upstream_status_table: if dep.status == PENDING and dep.deps: task_stack = task_stack + [dep_id] + list(dep.deps) upstream_status_table[dep_id] = '' # will be updated postorder else: dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '') upstream_status_table[dep_id] = dep_status elif upstream_status_table[dep_id] == '' and dep.deps: # This is the postorder update step when we set the # status based on the previously calculated child elements upstream_status = [upstream_status_table.get(a_task_id, '') for a_task_id in dep.deps] upstream_status.append('') # to handle empty list status = max(upstream_status, key=UPSTREAM_SEVERITY_KEY) upstream_status_table[dep_id] = status return upstream_status_table[dep_id] def _serialize_task(self, task_id, include_deps=True): task = self._state.get_task(task_id) ret = { 'status': task.status, 'workers': list(task.workers), 'worker_running': task.worker_running, 'time_running': getattr(task, "time_running", None), 'start_time': task.time, 'params': task.params, 'name': task.family, 'priority': task.priority, 'resources': task.resources, 'tracking_url': getattr(task, "tracking_url", None), } if task.status == DISABLED: ret['re_enable_able'] = task.scheduler_disable_time is not None if include_deps: ret['deps'] = list(task.deps) return ret def graph(self, **kwargs): self.prune() serialized = {} for task in self._state.get_active_tasks(): serialized[task.id] = self._serialize_task(task.id) return serialized def _recurse_deps(self, task_id, serialized): if task_id not in serialized: task = self._state.get_task(task_id) if task is None or not task.family: logger.warn('Missing task for id [%s]', task_id) # try to infer family and params from task_id try: family, _, param_str = task_id.rstrip(')').partition('(') params = dict(param.split('=') for param in param_str.split(', ')) except BaseException: family, params = '', {} serialized[task_id] = { 'deps': [], 'status': UNKNOWN, 'workers': [], 'start_time': UNKNOWN, 'params': params, 'name': family, 'priority': 0, } else: serialized[task_id] = self._serialize_task(task_id) for dep in task.deps: self._recurse_deps(dep, serialized) def dep_graph(self, task_id, **kwargs): self.prune() serialized = {} if self._state.has_task(task_id): self._recurse_deps(task_id, serialized) return serialized def task_list(self, status, upstream_status, limit=True, search=None, **kwargs): """ Query for a subset of tasks by status. """ self.prune() result = {} upstream_status_table = {} # used to memoize upstream status if search is None: filter_func = lambda _: True else: terms = search.split() filter_func = lambda t: all(term in t.id for term in terms) for task in filter(filter_func, self._state.get_active_tasks(status)): if (task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table)): serialized = self._serialize_task(task.id, False) result[task.id] = serialized if limit and len(result) > self._config.max_shown_tasks: return {'num_tasks': len(result)} return result def worker_list(self, include_running=True, **kwargs): self.prune() workers = [ dict( name=worker.id, last_active=worker.last_active, started=getattr(worker, 'started', None), **worker.info ) for worker in self._state.get_active_workers()] workers.sort(key=lambda worker: worker['started'], reverse=True) if include_running: running = collections.defaultdict(dict) num_pending = collections.defaultdict(int) num_uniques = collections.defaultdict(int) for task in self._state.get_pending_tasks(): if task.status == RUNNING and task.worker_running: running[task.worker_running][task.id] = self._serialize_task(task.id, False) elif task.status == PENDING: for worker in task.workers: num_pending[worker] += 1 if len(task.workers) == 1: num_uniques[list(task.workers)[0]] += 1 for worker in workers: tasks = running[worker['name']] worker['num_running'] = len(tasks) worker['num_pending'] = num_pending[worker['name']] worker['num_uniques'] = num_uniques[worker['name']] worker['running'] = tasks return workers def inverse_dep_graph(self, task_id, **kwargs): self.prune() serialized = {} if self._state.has_task(task_id): self._traverse_inverse_deps(task_id, serialized) return serialized def _traverse_inverse_deps(self, task_id, serialized): stack = [task_id] serialized[task_id] = self._serialize_task(task_id) while len(stack) > 0: curr_id = stack.pop() for task in self._state.get_active_tasks(): if curr_id in task.deps: serialized[curr_id]["deps"].append(task.id) if task.id not in serialized: serialized[task.id] = self._serialize_task(task.id) serialized[task.id]["deps"] = [] stack.append(task.id) def task_search(self, task_str, **kwargs): """ Query for a subset of tasks by task_id. :param task_str: :return: """ self.prune() result = collections.defaultdict(dict) for task in self._state.get_active_tasks(): if task.id.find(task_str) != -1: serialized = self._serialize_task(task.id, False) result[task.status][task.id] = serialized return result def re_enable_task(self, task_id): serialized = {} task = self._state.get_task(task_id) if task and task.status == DISABLED and task.scheduler_disable_time: self._state.re_enable(task, self._config) serialized = self._serialize_task(task_id) return serialized def fetch_error(self, task_id, **kwargs): if self._state.has_task(task_id): return {"taskId": task_id, "error": self._state.get_task(task_id).expl} else: return {"taskId": task_id, "error": ""} def _update_task_history(self, task, status, host=None): try: if status == DONE or status == FAILED: successful = (status == DONE) self._task_history.task_finished(task, successful) elif status == PENDING: self._task_history.task_scheduled(task) elif status == RUNNING: self._task_history.task_started(task, host) except BaseException: logger.warning("Error saving Task history", exc_info=True) @property def task_history(self): # Used by server.py to expose the calls return self._task_history
BugsInPy/BugsInPy/temp/projects/luigi/bug-18-fixed/luigi/luigi/scheduler.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-18-buggy/luigi/luigi/scheduler.py
luigi-bug-13
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ :class:`LocalTarget` provides a concrete implementation of a :py:class:`~luigi.target.Target` class that uses files on the local file system """ import os import random import shutil import tempfile import io import warnings from luigi.format import FileWrapper, get_default_format from luigi.target import FileAlreadyExists, MissingParentDirectory, NotADirectory, FileSystem, FileSystemTarget, AtomicLocalFile class atomic_file(AtomicLocalFile): """Simple class that writes to a temp file and moves it on close() Also cleans up the temp file if close is not invoked """ def move_to_final_destination(self): os.rename(self.tmp_path, self.path) def generate_tmp_path(self, path): return path + '-luigi-tmp-%09d' % random.randrange(0, 1e10) class LocalFileSystem(FileSystem): """ Wrapper for access to file system operations. Work in progress - add things as needed. """ def exists(self, path): return os.path.exists(path) def mkdir(self, path, parents=True, raise_if_exists=False): if self.exists(path): if raise_if_exists: raise FileAlreadyExists() elif not self.isdir(path): raise NotADirectory() else: return if parents: os.makedirs(path) else: if not os.path.exists(os.path.dirname(path)): raise MissingParentDirectory() os.mkdir(path) def isdir(self, path): return os.path.isdir(path) def listdir(self, path): for dir_, _, files in os.walk(path): assert dir_.startswith(path) for name in files: yield os.path.join(dir_, name) def remove(self, path, recursive=True): if recursive and self.isdir(path): shutil.rmtree(path) else: os.remove(path) def move(self, old_path, new_path, raise_if_exists=False): if raise_if_exists and os.path.exists(new_path): raise RuntimeError('Destination exists: %s' % new_path) d = os.path.dirname(new_path) if d and not os.path.exists(d): self.fs.mkdir(d) os.rename(old_path, new_path) class LocalTarget(FileSystemTarget): fs = LocalFileSystem() def __init__(self, path=None, format=None, is_tmp=False): if format is None: format = get_default_format() if not path: if not is_tmp: raise Exception('path or is_tmp must be set') path = os.path.join(tempfile.gettempdir(), 'luigi-tmp-%09d' % random.randint(0, 999999999)) super(LocalTarget, self).__init__(path) self.format = format self.is_tmp = is_tmp def makedirs(self): """ Create all parent folders if they do not exist. """ normpath = os.path.normpath(self.path) parentfolder = os.path.dirname(normpath) if parentfolder: try: os.makedirs(parentfolder) except OSError: pass def open(self, mode='r'): rwmode = mode.replace('b', '').replace('t', '') if rwmode == 'w': self.makedirs() return self.format.pipe_writer(atomic_file(self.path)) elif rwmode == 'r': fileobj = FileWrapper(io.BufferedReader(io.FileIO(self.path, mode))) return self.format.pipe_reader(fileobj) else: raise Exception('mode must be r/w (got:%s)' % mode) def move(self, new_path, raise_if_exists=False): self.fs.move(self.path, new_path, raise_if_exists=raise_if_exists) def move_dir(self, new_path): self.move(new_path) def remove(self): self.fs.remove(self.path) def copy(self, new_path, raise_if_exists=False): if raise_if_exists and os.path.exists(new_path): raise RuntimeError('Destination exists: %s' % new_path) tmp = LocalTarget(new_path + '-luigi-tmp-%09d' % random.randrange(0, 1e10), is_tmp=True) tmp.makedirs() shutil.copy(self.path, tmp.fn) tmp.move(new_path) @property def fn(self): return self.path def __del__(self): if self.is_tmp and self.exists(): self.remove() class File(LocalTarget): def __init__(self, *args, **kwargs): warnings.warn("File has been renamed LocalTarget", DeprecationWarning, stacklevel=2) super(File, self).__init__(*args, **kwargs) # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ :class:`LocalTarget` provides a concrete implementation of a :py:class:`~luigi.target.Target` class that uses files on the local file system """ import os import random import shutil import tempfile import io import warnings from luigi.format import FileWrapper, get_default_format from luigi.target import FileAlreadyExists, MissingParentDirectory, NotADirectory, FileSystem, FileSystemTarget, AtomicLocalFile class atomic_file(AtomicLocalFile): """Simple class that writes to a temp file and moves it on close() Also cleans up the temp file if close is not invoked """ def move_to_final_destination(self): os.rename(self.tmp_path, self.path) def generate_tmp_path(self, path): return path + '-luigi-tmp-%09d' % random.randrange(0, 1e10) class LocalFileSystem(FileSystem): """ Wrapper for access to file system operations. Work in progress - add things as needed. """ def exists(self, path): return os.path.exists(path) def mkdir(self, path, parents=True, raise_if_exists=False): if self.exists(path): if raise_if_exists: raise FileAlreadyExists() elif not self.isdir(path): raise NotADirectory() else: return if parents: os.makedirs(path) else: if not os.path.exists(os.path.dirname(path)): raise MissingParentDirectory() os.mkdir(path) def isdir(self, path): return os.path.isdir(path) def listdir(self, path): for dir_, _, files in os.walk(path): assert dir_.startswith(path) for name in files: yield os.path.join(dir_, name) def remove(self, path, recursive=True): if recursive and self.isdir(path): shutil.rmtree(path) else: os.remove(path) def move(self, old_path, new_path, raise_if_exists=False): if raise_if_exists and os.path.exists(new_path): raise RuntimeError('Destination exists: %s' % new_path) d = os.path.dirname(new_path) if d and not os.path.exists(d): self.mkdir(d) os.rename(old_path, new_path) class LocalTarget(FileSystemTarget): fs = LocalFileSystem() def __init__(self, path=None, format=None, is_tmp=False): if format is None: format = get_default_format() if not path: if not is_tmp: raise Exception('path or is_tmp must be set') path = os.path.join(tempfile.gettempdir(), 'luigi-tmp-%09d' % random.randint(0, 999999999)) super(LocalTarget, self).__init__(path) self.format = format self.is_tmp = is_tmp def makedirs(self): """ Create all parent folders if they do not exist. """ normpath = os.path.normpath(self.path) parentfolder = os.path.dirname(normpath) if parentfolder: try: os.makedirs(parentfolder) except OSError: pass def open(self, mode='r'): rwmode = mode.replace('b', '').replace('t', '') if rwmode == 'w': self.makedirs() return self.format.pipe_writer(atomic_file(self.path)) elif rwmode == 'r': fileobj = FileWrapper(io.BufferedReader(io.FileIO(self.path, mode))) return self.format.pipe_reader(fileobj) else: raise Exception('mode must be r/w (got:%s)' % mode) def move(self, new_path, raise_if_exists=False): self.fs.move(self.path, new_path, raise_if_exists=raise_if_exists) def move_dir(self, new_path): self.move(new_path) def remove(self): self.fs.remove(self.path) def copy(self, new_path, raise_if_exists=False): if raise_if_exists and os.path.exists(new_path): raise RuntimeError('Destination exists: %s' % new_path) tmp = LocalTarget(new_path + '-luigi-tmp-%09d' % random.randrange(0, 1e10), is_tmp=True) tmp.makedirs() shutil.copy(self.path, tmp.fn) tmp.move(new_path) @property def fn(self): return self.path def __del__(self): if self.is_tmp and self.exists(): self.remove() class File(LocalTarget): def __init__(self, *args, **kwargs): warnings.warn("File has been renamed LocalTarget", DeprecationWarning, stacklevel=2) super(File, self).__init__(*args, **kwargs)
BugsInPy/BugsInPy/temp/projects/luigi/bug-13-fixed/luigi/luigi/file.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-13-buggy/luigi/luigi/file.py
luigi-bug-20
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The abstract :py:class:`Task` class. It is a central concept of Luigi and represents the state of the workflow. See :doc:`/tasks` for an overview. """ try: from itertools import imap as map except ImportError: pass import logging import traceback import warnings from luigi import six from luigi import parameter from luigi.task_register import Register, TaskClassException Parameter = parameter.Parameter logger = logging.getLogger('luigi-interface') def namespace(namespace=None): """ Call to set namespace of tasks declared after the call. If called without arguments or with ``None`` as the namespace, the namespace is reset, which is recommended to do at the end of any file where the namespace is set to avoid unintentionally setting namespace on tasks outside of the scope of the current file. The namespace of a Task can also be changed by specifying the property ``task_namespace``. This solution has the advantage that the namespace doesn't have to be restored. .. code-block:: python class Task2(luigi.Task): task_namespace = 'namespace2' """ Register._default_namespace = namespace def id_to_name_and_params(task_id): # DEPRECATED import luigi.tools.parse_task return luigi.tools.parse_task.id_to_name_and_params(task_id) class BulkCompleteNotImplementedError(NotImplementedError): """This is here to trick pylint. pylint thinks anything raising NotImplementedError needs to be implemented in any subclass. bulk_complete isn't like that. This tricks pylint into thinking that the default implementation is a valid implementation and no an abstract method.""" pass @six.add_metaclass(Register) class Task(object): """ This is the base class of all Luigi Tasks, the base unit of work in Luigi. A Luigi Task describes a unit or work. The key methods of a Task, which must be implemented in a subclass are: * :py:meth:`run` - the computation done by this task. * :py:meth:`requires` - the list of Tasks that this Task depends on. * :py:meth:`output` - the output :py:class:`Target` that this Task creates. Parameters to the Task should be declared as members of the class, e.g.:: .. code-block:: python class MyTask(luigi.Task): count = luigi.IntParameter() Each Task exposes a constructor accepting all :py:class:`Parameter` (and values) as kwargs. e.g. ``MyTask(count=10)`` would instantiate `MyTask`. In addition to any declared properties and methods, there are a few non-declared properties, which are created by the :py:class:`Register` metaclass: ``Task.task_namespace`` optional string which is prepended to the task name for the sake of scheduling. If it isn't overridden in a Task, whatever was last declared using `luigi.namespace` will be used. ``Task._parameters`` list of ``(parameter_name, parameter)`` tuples for this task class """ _event_callbacks = {} #: Priority of the task: the scheduler should favor available #: tasks with higher priority values first. #: See :ref:`Task.priority` priority = 0 disabled = False #: Resources used by the task. Should be formatted like {"scp": 1} to indicate that the #: task requires 1 unit of the scp resource. resources = {} #: Number of seconds after which to time out the run function. #: No timeout if set to 0. #: Defaults to 0 or value in luigi.cfg worker_timeout = None @property def use_cmdline_section(self): ''' Property used by core config such as `--workers` etc. These will be exposed without the class as prefix.''' return True @classmethod def event_handler(cls, event): """ Decorator for adding event handlers. """ def wrapped(callback): cls._event_callbacks.setdefault(cls, {}).setdefault(event, set()).add(callback) return callback return wrapped def trigger_event(self, event, *args, **kwargs): """ Trigger that calls all of the specified events associated with this class. """ for event_class, event_callbacks in six.iteritems(self._event_callbacks): if not isinstance(self, event_class): continue for callback in event_callbacks.get(event, []): try: # callbacks are protected callback(*args, **kwargs) except KeyboardInterrupt: return except BaseException: logger.exception("Error in event callback for %r", event) @property def task_module(self): ''' Returns what Python module to import to get access to this class. ''' # TODO(erikbern): we should think about a language-agnostic mechanism return self.__class__.__module__ @property def task_family(self): """ Convenience method since a property on the metaclass isn't directly accessible through the class instances. """ return self.__class__.task_family @classmethod def get_params(cls): """ Returns all of the Parameters for this Task. """ # We want to do this here and not at class instantiation, or else there is no room to extend classes dynamically params = [] for param_name in dir(cls): param_obj = getattr(cls, param_name) if not isinstance(param_obj, Parameter): continue params.append((param_name, param_obj)) # The order the parameters are created matters. See Parameter class params.sort(key=lambda t: t[1].counter) return params @classmethod def get_param_values(cls, params, args, kwargs): """ Get the values of the parameters from the args and kwargs. :param params: list of (param_name, Parameter). :param args: positional arguments :param kwargs: keyword arguments. :returns: list of `(name, value)` tuples, one for each parameter. """ result = {} params_dict = dict(params) task_name = cls.task_family # In case any exceptions are thrown, create a helpful description of how the Task was invoked # TODO: should we detect non-reprable arguments? These will lead to mysterious errors exc_desc = '%s[args=%s, kwargs=%s]' % (task_name, args, kwargs) # Fill in the positional arguments positional_params = [(n, p) for n, p in params if p.positional] for i, arg in enumerate(args): if i >= len(positional_params): raise parameter.UnknownParameterException('%s: takes at most %d parameters (%d given)' % (exc_desc, len(positional_params), len(args))) param_name, param_obj = positional_params[i] result[param_name] = arg # Then the optional arguments for param_name, arg in six.iteritems(kwargs): if param_name in result: raise parameter.DuplicateParameterException('%s: parameter %s was already set as a positional parameter' % (exc_desc, param_name)) if param_name not in params_dict: raise parameter.UnknownParameterException('%s: unknown parameter %s' % (exc_desc, param_name)) result[param_name] = arg # Then use the defaults for anything not filled in for param_name, param_obj in params: if param_name not in result: if not param_obj.has_task_value(task_name, param_name): raise parameter.MissingParameterException("%s: requires the '%s' parameter to be set" % (exc_desc, param_name)) result[param_name] = param_obj.task_value(task_name, param_name) def list_to_tuple(x): """ Make tuples out of lists and sets to allow hashing """ if isinstance(x, list) or isinstance(x, set): return tuple(x) else: return x # Sort it by the correct order and make a list return [(param_name, list_to_tuple(result[param_name])) for param_name, param_obj in params] def __init__(self, *args, **kwargs): """ Constructor to resolve values for all Parameters. For example, the Task: .. code-block:: python class MyTask(luigi.Task): count = luigi.IntParameter() can be instantiated as ``MyTask(count=10)``. """ params = self.get_params() param_values = self.get_param_values(params, args, kwargs) # Set all values on class instance for key, value in param_values: setattr(self, key, value) # Register args and kwargs as an attribute on the class. Might be useful self.param_args = tuple(value for key, value in param_values) self.param_kwargs = dict(param_values) # Build up task id task_id_parts = [] param_objs = dict(params) for param_name, param_value in param_values: if param_objs[param_name].significant: task_id_parts.append('%s=%s' % (param_name, param_objs[param_name].serialize(param_value))) self.task_id = '%s(%s)' % (self.task_family, ', '.join(task_id_parts)) self.__hash = hash(self.task_id) def initialized(self): """ Returns ``True`` if the Task is initialized and ``False`` otherwise. """ return hasattr(self, 'task_id') @classmethod def from_str_params(cls, params_str=None): """ Creates an instance from a str->str hash. :param params_str: dict of param name -> value. """ if params_str is None: params_str = {} kwargs = {} for param_name, param in cls.get_params(): value = param.parse_from_input(param_name, params_str[param_name]) kwargs[param_name] = value return cls(**kwargs) def to_str_params(self): """ Convert all parameters to a str->str hash. """ params_str = {} params = dict(self.get_params()) for param_name, param_value in six.iteritems(self.param_kwargs): if params[param_name].significant: params_str[param_name] = params[param_name].serialize(param_value) return params_str def clone(self, cls=None, **kwargs): """ Creates a new instance from an existing instance where some of the args have changed. There's at least two scenarios where this is useful (see test/clone_test.py): * remove a lot of boiler plate when you have recursive dependencies and lots of args * there's task inheritance and some logic is on the base class :param cls: :param kwargs: :return: """ k = self.param_kwargs.copy() k.update(six.iteritems(kwargs)) if cls is None: cls = self.__class__ new_k = {} for param_name, param_class in cls.get_params(): if param_name in k: new_k[param_name] = k[param_name] return cls(**new_k) def __hash__(self): return self.__hash def __repr__(self): return self.task_id def __eq__(self, other): return self.__class__ == other.__class__ and self.param_args == other.param_args def complete(self): """ If the task has any outputs, return ``True`` if all outputs exists. Otherwise, return ``False``. However, you may freely override this method with custom logic. """ outputs = flatten(self.output()) if len(outputs) == 0: warnings.warn( "Task %r without outputs has no custom complete() method" % self, stacklevel=2 ) return False return all(map(lambda output: output.exists(), outputs)) @classmethod def bulk_complete(cls, parameter_tuples): """ Returns those of parameter_tuples for which this Task is complete. Override (with an efficient implementation) for efficient scheduling with range tools. Keep the logic consistent with that of complete(). """ raise BulkCompleteNotImplementedError() def output(self): """ The output that this Task produces. The output of the Task determines if the Task needs to be run--the task is considered finished iff the outputs all exist. Subclasses should override this method to return a single :py:class:`Target` or a list of :py:class:`Target` instances. Implementation note If running multiple workers, the output must be a resource that is accessible by all workers, such as a DFS or database. Otherwise, workers might compute the same output since they don't see the work done by other workers. See :ref:`Task.output` """ return [] # default impl def requires(self): """ The Tasks that this Task depends on. A Task will only run if all of the Tasks that it requires are completed. If your Task does not require any other Tasks, then you don't need to override this method. Otherwise, a Subclasses can override this method to return a single Task, a list of Task instances, or a dict whose values are Task instances. See :ref:`Task.requires` """ return [] # default impl def _requires(self): """ Override in "template" tasks which themselves are supposed to be subclassed and thus have their requires() overridden (name preserved to provide consistent end-user experience), yet need to introduce (non-input) dependencies. Must return an iterable which among others contains the _requires() of the superclass. """ return flatten(self.requires()) # base impl def process_resources(self): """ Override in "template" tasks which provide common resource functionality but allow subclasses to specify additional resources while preserving the name for consistent end-user experience. """ return self.resources # default impl def input(self): """ Returns the outputs of the Tasks returned by :py:meth:`requires` See :ref:`Task.input` :return: a list of :py:class:`Target` objects which are specified as outputs of all required Tasks. """ return getpaths(self.requires()) def deps(self): """ Internal method used by the scheduler. Returns the flattened list of requires. """ # used by scheduler return flatten(self._requires()) def run(self): """ The task run method, to be overridden in a subclass. See :ref:`Task.run` """ pass # default impl def on_failure(self, exception): """ Override for custom error handling. This method gets called if an exception is raised in :py:meth:`run`. Return value of this method is json encoded and sent to the scheduler as the `expl` argument. Its string representation will be used as the body of the error email sent out if any. Default behavior is to return a string representation of the stack trace. """ traceback_string = traceback.format_exc() return "Runtime error:\n%s" % traceback_string def on_success(self): """ Override for doing custom completion handling for a larger class of tasks This method gets called when :py:meth:`run` completes without raising any exceptions. The returned value is json encoded and sent to the scheduler as the `expl` argument. Default behavior is to send an None value""" pass class MixinNaiveBulkComplete(object): """ Enables a Task to be efficiently scheduled with e.g. range tools, by providing a bulk_complete implementation which checks completeness in a loop. Applicable to tasks whose completeness checking is cheap. This doesn't exploit output location specific APIs for speed advantage, nevertheless removes redundant scheduler roundtrips. """ @classmethod def bulk_complete(cls, parameter_tuples): return [t for t in parameter_tuples if cls(t).complete()] def externalize(task): """ Returns an externalized version of the Task. See :py:class:`ExternalTask`. """ task.run = NotImplemented return task class ExternalTask(Task): """ Subclass for references to external dependencies. An ExternalTask's does not have a `run` implementation, which signifies to the framework that this Task's :py:meth:`output` is generated outside of Luigi. """ run = NotImplemented class WrapperTask(Task): """ Use for tasks that only wrap other tasks and that by definition are done if all their requirements exist. """ def complete(self): return all(r.complete() for r in flatten(self.requires())) class Config(Task): """Used for configuration that's not specific to a certain task TODO: let's refactor Task & Config so that it inherits from a common ParamContainer base class """ pass def getpaths(struct): """ Maps all Tasks in a structured data object to their .output(). """ if isinstance(struct, Task): return struct.output() elif isinstance(struct, dict): r = {} for k, v in six.iteritems(struct): r[k] = getpaths(v) return r else: # Remaining case: assume r is iterable... try: s = list(struct) except TypeError: raise Exception('Cannot map %s to Task/dict/list' % str(struct)) return [getpaths(r) for r in s] def flatten(struct): """ Creates a flat list of all all items in structured output (dicts, lists, items): .. code-block:: python >>> sorted(flatten({'a': 'foo', 'b': 'bar'})) ['bar', 'foo'] >>> sorted(flatten(['foo', ['bar', 'troll']])) ['bar', 'foo', 'troll'] >>> flatten('foo') ['foo'] >>> flatten(42) [42] """ if struct is None: return [] flat = [] if isinstance(struct, dict): for _, result in six.iteritems(struct): flat += flatten(result) return flat if isinstance(struct, six.string_types): return [struct] try: # if iterable iterator = iter(struct) except TypeError: return [struct] for result in iterator: flat += flatten(result) return flat def flatten_output(task): """ Lists all output targets by recursively walking output-less (wrapper) tasks. FIXME order consistently. """ r = flatten(task.output()) if not r: for dep in flatten(task.requires()): r += flatten_output(dep) return r # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The abstract :py:class:`Task` class. It is a central concept of Luigi and represents the state of the workflow. See :doc:`/tasks` for an overview. """ try: from itertools import imap as map except ImportError: pass import logging import traceback import warnings from luigi import six from luigi import parameter from luigi.task_register import Register, TaskClassException Parameter = parameter.Parameter logger = logging.getLogger('luigi-interface') def namespace(namespace=None): """ Call to set namespace of tasks declared after the call. If called without arguments or with ``None`` as the namespace, the namespace is reset, which is recommended to do at the end of any file where the namespace is set to avoid unintentionally setting namespace on tasks outside of the scope of the current file. The namespace of a Task can also be changed by specifying the property ``task_namespace``. This solution has the advantage that the namespace doesn't have to be restored. .. code-block:: python class Task2(luigi.Task): task_namespace = 'namespace2' """ Register._default_namespace = namespace def id_to_name_and_params(task_id): # DEPRECATED import luigi.tools.parse_task return luigi.tools.parse_task.id_to_name_and_params(task_id) class BulkCompleteNotImplementedError(NotImplementedError): """This is here to trick pylint. pylint thinks anything raising NotImplementedError needs to be implemented in any subclass. bulk_complete isn't like that. This tricks pylint into thinking that the default implementation is a valid implementation and no an abstract method.""" pass @six.add_metaclass(Register) class Task(object): """ This is the base class of all Luigi Tasks, the base unit of work in Luigi. A Luigi Task describes a unit or work. The key methods of a Task, which must be implemented in a subclass are: * :py:meth:`run` - the computation done by this task. * :py:meth:`requires` - the list of Tasks that this Task depends on. * :py:meth:`output` - the output :py:class:`Target` that this Task creates. Parameters to the Task should be declared as members of the class, e.g.:: .. code-block:: python class MyTask(luigi.Task): count = luigi.IntParameter() Each Task exposes a constructor accepting all :py:class:`Parameter` (and values) as kwargs. e.g. ``MyTask(count=10)`` would instantiate `MyTask`. In addition to any declared properties and methods, there are a few non-declared properties, which are created by the :py:class:`Register` metaclass: ``Task.task_namespace`` optional string which is prepended to the task name for the sake of scheduling. If it isn't overridden in a Task, whatever was last declared using `luigi.namespace` will be used. ``Task._parameters`` list of ``(parameter_name, parameter)`` tuples for this task class """ _event_callbacks = {} #: Priority of the task: the scheduler should favor available #: tasks with higher priority values first. #: See :ref:`Task.priority` priority = 0 disabled = False #: Resources used by the task. Should be formatted like {"scp": 1} to indicate that the #: task requires 1 unit of the scp resource. resources = {} #: Number of seconds after which to time out the run function. #: No timeout if set to 0. #: Defaults to 0 or value in luigi.cfg worker_timeout = None @property def use_cmdline_section(self): ''' Property used by core config such as `--workers` etc. These will be exposed without the class as prefix.''' return True @classmethod def event_handler(cls, event): """ Decorator for adding event handlers. """ def wrapped(callback): cls._event_callbacks.setdefault(cls, {}).setdefault(event, set()).add(callback) return callback return wrapped def trigger_event(self, event, *args, **kwargs): """ Trigger that calls all of the specified events associated with this class. """ for event_class, event_callbacks in six.iteritems(self._event_callbacks): if not isinstance(self, event_class): continue for callback in event_callbacks.get(event, []): try: # callbacks are protected callback(*args, **kwargs) except KeyboardInterrupt: return except BaseException: logger.exception("Error in event callback for %r", event) @property def task_module(self): ''' Returns what Python module to import to get access to this class. ''' # TODO(erikbern): we should think about a language-agnostic mechanism return self.__class__.__module__ @property def task_family(self): """ Convenience method since a property on the metaclass isn't directly accessible through the class instances. """ return self.__class__.task_family @classmethod def get_params(cls): """ Returns all of the Parameters for this Task. """ # We want to do this here and not at class instantiation, or else there is no room to extend classes dynamically params = [] for param_name in dir(cls): param_obj = getattr(cls, param_name) if not isinstance(param_obj, Parameter): continue params.append((param_name, param_obj)) # The order the parameters are created matters. See Parameter class params.sort(key=lambda t: t[1].counter) return params @classmethod def get_param_values(cls, params, args, kwargs): """ Get the values of the parameters from the args and kwargs. :param params: list of (param_name, Parameter). :param args: positional arguments :param kwargs: keyword arguments. :returns: list of `(name, value)` tuples, one for each parameter. """ result = {} params_dict = dict(params) task_name = cls.task_family # In case any exceptions are thrown, create a helpful description of how the Task was invoked # TODO: should we detect non-reprable arguments? These will lead to mysterious errors exc_desc = '%s[args=%s, kwargs=%s]' % (task_name, args, kwargs) # Fill in the positional arguments positional_params = [(n, p) for n, p in params if p.positional] for i, arg in enumerate(args): if i >= len(positional_params): raise parameter.UnknownParameterException('%s: takes at most %d parameters (%d given)' % (exc_desc, len(positional_params), len(args))) param_name, param_obj = positional_params[i] result[param_name] = arg # Then the optional arguments for param_name, arg in six.iteritems(kwargs): if param_name in result: raise parameter.DuplicateParameterException('%s: parameter %s was already set as a positional parameter' % (exc_desc, param_name)) if param_name not in params_dict: raise parameter.UnknownParameterException('%s: unknown parameter %s' % (exc_desc, param_name)) result[param_name] = arg # Then use the defaults for anything not filled in for param_name, param_obj in params: if param_name not in result: if not param_obj.has_task_value(task_name, param_name): raise parameter.MissingParameterException("%s: requires the '%s' parameter to be set" % (exc_desc, param_name)) result[param_name] = param_obj.task_value(task_name, param_name) def list_to_tuple(x): """ Make tuples out of lists and sets to allow hashing """ if isinstance(x, list) or isinstance(x, set): return tuple(x) else: return x # Sort it by the correct order and make a list return [(param_name, list_to_tuple(result[param_name])) for param_name, param_obj in params] def __init__(self, *args, **kwargs): """ Constructor to resolve values for all Parameters. For example, the Task: .. code-block:: python class MyTask(luigi.Task): count = luigi.IntParameter() can be instantiated as ``MyTask(count=10)``. """ params = self.get_params() param_values = self.get_param_values(params, args, kwargs) # Set all values on class instance for key, value in param_values: setattr(self, key, value) # Register args and kwargs as an attribute on the class. Might be useful self.param_args = tuple(value for key, value in param_values) self.param_kwargs = dict(param_values) # Build up task id task_id_parts = [] param_objs = dict(params) for param_name, param_value in param_values: if param_objs[param_name].significant: task_id_parts.append('%s=%s' % (param_name, param_objs[param_name].serialize(param_value))) self.task_id = '%s(%s)' % (self.task_family, ', '.join(task_id_parts)) self.__hash = hash(self.task_id) def initialized(self): """ Returns ``True`` if the Task is initialized and ``False`` otherwise. """ return hasattr(self, 'task_id') @classmethod def from_str_params(cls, params_str=None): """ Creates an instance from a str->str hash. :param params_str: dict of param name -> value. """ if params_str is None: params_str = {} kwargs = {} for param_name, param in cls.get_params(): value = param.parse_from_input(param_name, params_str[param_name]) kwargs[param_name] = value return cls(**kwargs) def to_str_params(self): """ Convert all parameters to a str->str hash. """ params_str = {} params = dict(self.get_params()) for param_name, param_value in six.iteritems(self.param_kwargs): if params[param_name].significant: params_str[param_name] = params[param_name].serialize(param_value) return params_str def clone(self, cls=None, **kwargs): """ Creates a new instance from an existing instance where some of the args have changed. There's at least two scenarios where this is useful (see test/clone_test.py): * remove a lot of boiler plate when you have recursive dependencies and lots of args * there's task inheritance and some logic is on the base class :param cls: :param kwargs: :return: """ k = self.param_kwargs.copy() k.update(six.iteritems(kwargs)) if cls is None: cls = self.__class__ new_k = {} for param_name, param_class in cls.get_params(): if param_name in k: new_k[param_name] = k[param_name] return cls(**new_k) def __hash__(self): return self.__hash def __repr__(self): return self.task_id def __eq__(self, other): return self.__class__ == other.__class__ and self.param_args == other.param_args def complete(self): """ If the task has any outputs, return ``True`` if all outputs exists. Otherwise, return ``False``. However, you may freely override this method with custom logic. """ outputs = flatten(self.output()) if len(outputs) == 0: warnings.warn( "Task %r without outputs has no custom complete() method" % self, stacklevel=2 ) return False return all(map(lambda output: output.exists(), outputs)) @classmethod def bulk_complete(cls, parameter_tuples): """ Returns those of parameter_tuples for which this Task is complete. Override (with an efficient implementation) for efficient scheduling with range tools. Keep the logic consistent with that of complete(). """ raise BulkCompleteNotImplementedError() def output(self): """ The output that this Task produces. The output of the Task determines if the Task needs to be run--the task is considered finished iff the outputs all exist. Subclasses should override this method to return a single :py:class:`Target` or a list of :py:class:`Target` instances. Implementation note If running multiple workers, the output must be a resource that is accessible by all workers, such as a DFS or database. Otherwise, workers might compute the same output since they don't see the work done by other workers. See :ref:`Task.output` """ return [] # default impl def requires(self): """ The Tasks that this Task depends on. A Task will only run if all of the Tasks that it requires are completed. If your Task does not require any other Tasks, then you don't need to override this method. Otherwise, a Subclasses can override this method to return a single Task, a list of Task instances, or a dict whose values are Task instances. See :ref:`Task.requires` """ return [] # default impl def _requires(self): """ Override in "template" tasks which themselves are supposed to be subclassed and thus have their requires() overridden (name preserved to provide consistent end-user experience), yet need to introduce (non-input) dependencies. Must return an iterable which among others contains the _requires() of the superclass. """ return flatten(self.requires()) # base impl def process_resources(self): """ Override in "template" tasks which provide common resource functionality but allow subclasses to specify additional resources while preserving the name for consistent end-user experience. """ return self.resources # default impl def input(self): """ Returns the outputs of the Tasks returned by :py:meth:`requires` See :ref:`Task.input` :return: a list of :py:class:`Target` objects which are specified as outputs of all required Tasks. """ return getpaths(self.requires()) def deps(self): """ Internal method used by the scheduler. Returns the flattened list of requires. """ # used by scheduler return flatten(self._requires()) def run(self): """ The task run method, to be overridden in a subclass. See :ref:`Task.run` """ pass # default impl def on_failure(self, exception): """ Override for custom error handling. This method gets called if an exception is raised in :py:meth:`run`. Return value of this method is json encoded and sent to the scheduler as the `expl` argument. Its string representation will be used as the body of the error email sent out if any. Default behavior is to return a string representation of the stack trace. """ traceback_string = traceback.format_exc() return "Runtime error:\n%s" % traceback_string def on_success(self): """ Override for doing custom completion handling for a larger class of tasks This method gets called when :py:meth:`run` completes without raising any exceptions. The returned value is json encoded and sent to the scheduler as the `expl` argument. Default behavior is to send an None value""" pass class MixinNaiveBulkComplete(object): """ Enables a Task to be efficiently scheduled with e.g. range tools, by providing a bulk_complete implementation which checks completeness in a loop. Applicable to tasks whose completeness checking is cheap. This doesn't exploit output location specific APIs for speed advantage, nevertheless removes redundant scheduler roundtrips. """ @classmethod def bulk_complete(cls, parameter_tuples): return [t for t in parameter_tuples if cls(t).complete()] def externalize(task): """ Returns an externalized version of the Task. See :py:class:`ExternalTask`. """ task.run = NotImplemented return task class ExternalTask(Task): """ Subclass for references to external dependencies. An ExternalTask's does not have a `run` implementation, which signifies to the framework that this Task's :py:meth:`output` is generated outside of Luigi. """ run = NotImplemented class WrapperTask(Task): """ Use for tasks that only wrap other tasks and that by definition are done if all their requirements exist. """ def complete(self): return all(r.complete() for r in flatten(self.requires())) class Config(Task): """Used for configuration that's not specific to a certain task TODO: let's refactor Task & Config so that it inherits from a common ParamContainer base class """ pass def getpaths(struct): """ Maps all Tasks in a structured data object to their .output(). """ if isinstance(struct, Task): return struct.output() elif isinstance(struct, dict): r = {} for k, v in six.iteritems(struct): r[k] = getpaths(v) return r else: # Remaining case: assume r is iterable... try: s = list(struct) except TypeError: raise Exception('Cannot map %s to Task/dict/list' % str(struct)) return [getpaths(r) for r in s] def flatten(struct): """ Creates a flat list of all all items in structured output (dicts, lists, items): .. code-block:: python >>> sorted(flatten({'a': 'foo', 'b': 'bar'})) ['bar', 'foo'] >>> sorted(flatten(['foo', ['bar', 'troll']])) ['bar', 'foo', 'troll'] >>> flatten('foo') ['foo'] >>> flatten(42) [42] """ if struct is None: return [] flat = [] if isinstance(struct, dict): for _, result in six.iteritems(struct): flat += flatten(result) return flat if isinstance(struct, six.string_types): return [struct] try: # if iterable iterator = iter(struct) except TypeError: return [struct] for result in iterator: flat += flatten(result) return flat def flatten_output(task): """ Lists all output targets by recursively walking output-less (wrapper) tasks. FIXME order consistently. """ r = flatten(task.output()) if not r: for dep in flatten(task.requires()): r += flatten_output(dep) return r
BugsInPy/BugsInPy/temp/projects/luigi/bug-20-fixed/luigi/luigi/task.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-20-buggy/luigi/luigi/task.py
luigi-bug-2
# -*- coding: utf-8 -*- # # Copyright 2019 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc from abc import abstractmethod, abstractproperty, ABCMeta import logging import json import os import subprocess import luigi from luigi import six from luigi.contrib import bigquery, gcs from luigi.task import MixinNaiveBulkComplete logger = logging.getLogger('luigi-interface') @six.add_metaclass(abc.ABCMeta) class DataflowParamKeys(object): """ Defines the naming conventions for Dataflow execution params. For example, the Java API expects param names in lower camel case, whereas the Python implementation expects snake case. """ @abstractproperty def runner(self): pass @abstractproperty def project(self): pass @abstractproperty def zone(self): pass @abstractproperty def region(self): pass @abstractproperty def staging_location(self): pass @abstractproperty def temp_location(self): pass @abstractproperty def gcp_temp_location(self): pass @abstractproperty def num_workers(self): pass @abstractproperty def autoscaling_algorithm(self): pass @abstractproperty def max_num_workers(self): pass @abstractproperty def disk_size_gb(self): pass @abstractproperty def worker_machine_type(self): pass @abstractproperty def worker_disk_type(self): pass @abstractproperty def job_name(self): pass @abstractproperty def service_account(self): pass @abstractproperty def network(self): pass @abstractproperty def subnetwork(self): pass @abstractproperty def labels(self): pass class _CmdLineRunner(object): """ Executes a given command line class in a subprocess, logging its output. If more complex monitoring/logging is desired, user can implement their own launcher class and set it in BeamDataflowJobTask.cmd_line_runner. """ @staticmethod def run(cmd, task=None): process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True ) output_lines = [] while True: line = process.stdout.readline() if not line: break line = line.decode("utf-8") output_lines += [line] logger.info(line.rstrip("\n")) process.stdout.close() exit_code = process.wait() if exit_code: output = "".join(output_lines) raise subprocess.CalledProcessError(exit_code, cmd, output=output) @six.add_metaclass(ABCMeta) class BeamDataflowJobTask(MixinNaiveBulkComplete, luigi.Task): """ Luigi wrapper for a Dataflow job. Must be overridden for each Beam SDK with that SDK's dataflow_executable(). For more documentation, see: https://cloud.google.com/dataflow/docs/guides/specifying-exec-params The following required Dataflow properties must be set: project # GCP project ID temp_location # Cloud storage path for temporary files The following optional Dataflow properties can be set: runner # PipelineRunner implementation for your Beam job. Default: DirectRunner num_workers # The number of workers to start the task with Default: Determined by Dataflow service autoscaling_algorithm # The Autoscaling mode for the Dataflow job Default: `THROUGHPUT_BASED` max_num_workers # Used if the autoscaling is enabled Default: Determined by Dataflow service network # Network in GCE to be used for launching workers Default: a network named "default" subnetwork # Subnetwork in GCE to be used for launching workers Default: Determined by Dataflow service disk_size_gb # Remote worker disk size. Minimum value is 30GB Default: set to 0 to use GCP project default worker_machine_type # Machine type to create Dataflow worker VMs Default: Determined by Dataflow service job_name # Custom job name, must be unique across project's active jobs worker_disk_type # Specify SSD for local disk or defaults to hard disk as a full URL of disk type resource Default: Determined by Dataflow service. service_account # Service account of Dataflow VMs/workers Default: active GCE service account region # Region to deploy Dataflow job to Default: us-central1 zone # Availability zone for launching workers instances Default: an available zone in the specified region staging_location # Cloud Storage bucket for Dataflow to stage binary files Default: the value of temp_location gcp_temp_location # Cloud Storage path for Dataflow to stage temporary files Default: the value of temp_location labels # Custom GCP labels attached to the Dataflow job Default: nothing """ project = None runner = None temp_location = None staging_location = None gcp_temp_location = None num_workers = None autoscaling_algorithm = None max_num_workers = None network = None subnetwork = None disk_size_gb = None worker_machine_type = None job_name = None worker_disk_type = None service_account = None zone = None region = None labels = {} cmd_line_runner = _CmdLineRunner dataflow_params = None def __init__(self): if not isinstance(self.dataflow_params, DataflowParamKeys): raise ValueError("dataflow_params must be of type DataflowParamKeys") @abstractmethod def dataflow_executable(self): """ Command representing the Dataflow executable to be run. For example: return ['java', 'com.spotify.luigi.MyClass', '-Xmx256m'] """ pass def args(self): """ Extra String arguments that will be passed to your Dataflow job. For example: return ['--setup_file=setup.py'] """ return [] def before_run(self): """ Hook that gets called right before the Dataflow job is launched. Can be used to setup any temporary files/tables, validate input, etc. """ pass def on_successful_run(self): """ Callback that gets called right after the Dataflow job has finished successfully but before validate_output is run. """ pass def validate_output(self): """ Callback that can be used to validate your output before it is moved to its final location. Returning false here will cause the job to fail, and output to be removed instead of published. """ return True def file_pattern(self): """ If one/some of the input target files are not in the pattern of part-*, we can add the key of the required target and the correct file pattern that should be appended in the command line here. If the input target key is not found in this dict, the file pattern will be assumed to be part-* for that target. :return A dictionary of overridden file pattern that is not part-* for the inputs """ return {} def on_successful_output_validation(self): """ Callback that gets called after the Dataflow job has finished successfully if validate_output returns True. """ pass def cleanup_on_error(self, error): """ Callback that gets called after the Dataflow job has finished unsuccessfully, or validate_output returns False. """ pass def run(self): cmd_line = self._mk_cmd_line() logger.info(' '.join(cmd_line)) self.before_run() try: self.cmd_line_runner.run(cmd_line, self) except subprocess.CalledProcessError as e: logger.error(e, exc_info=True) self.cleanup_on_error(e) os._exit(e.returncode) self.on_successful_run() if self.validate_output(): self.on_successful_output_validation() else: error = ValueError("Output validation failed") self.cleanup_on_error(error) raise error def _mk_cmd_line(self): cmd_line = self.dataflow_executable() cmd_line.extend(self._get_dataflow_args()) cmd_line.extend(self.args()) cmd_line.extend(self._format_input_args()) cmd_line.extend(self._format_output_args()) return cmd_line def _get_runner(self): if not self.runner: logger.warning("Runner not supplied to BeamDataflowJobTask. " + "Defaulting to DirectRunner.") return "DirectRunner" elif self.runner in [ "DataflowRunner", "DirectRunner" ]: return self.runner else: raise ValueError("Runner %s is unsupported." % self.runner) def _get_dataflow_args(self): def f(key, value): return '--{}={}'.format(key, value) output = [] output.append(f(self.dataflow_params.runner, self._get_runner())) if self.project: output.append(f(self.dataflow_params.project, self.project)) if self.zone: output.append(f(self.dataflow_params.zone, self.zone)) if self.region: output.append(f(self.dataflow_params.region, self.region)) if self.staging_location: output.append(f(self.dataflow_params.staging_location, self.staging_location)) if self.temp_location: output.append(f(self.dataflow_params.temp_location, self.temp_location)) if self.gcp_temp_location: output.append(f(self.dataflow_params.gcp_temp_location, self.gcp_temp_location)) if self.num_workers: output.append(f(self.dataflow_params.num_workers, self.num_workers)) if self.autoscaling_algorithm: output.append(f(self.dataflow_params.autoscaling_algorithm, self.autoscaling_algorithm)) if self.max_num_workers: output.append(f(self.dataflow_params.max_num_workers, self.max_num_workers)) if self.disk_size_gb: output.append(f(self.dataflow_params.disk_size_gb, self.disk_size_gb)) if self.worker_machine_type: output.append(f(self.dataflow_params.worker_machine_type, self.worker_machine_type)) if self.worker_disk_type: output.append(f(self.dataflow_params.worker_disk_type, self.worker_disk_type)) if self.network: output.append(f(self.dataflow_params.network, self.network)) if self.subnetwork: output.append(f(self.dataflow_params.subnetwork, self.subnetwork)) if self.job_name: output.append(f(self.dataflow_params.job_name, self.job_name)) if self.service_account: output.append(f(self.dataflow_params.service_account, self.service_account)) if self.labels: output.append(f(self.dataflow_params.labels, json.dumps(self.labels))) return output def _format_input_args(self): """ Parses the result(s) of self.input() into a string-serialized key-value list passed to the Dataflow job. Valid inputs include: return FooTarget() return {"input1": FooTarget(), "input2": FooTarget2()) return ("input", FooTarget()) return [("input1", FooTarget()), ("input2": FooTarget2())] return [FooTarget(), FooTarget2()] Unlabeled input are passed in with under the default key "input". """ job_input = self.input() if isinstance(job_input, luigi.Target): job_input = {"input": job_input} elif isinstance(job_input, tuple): job_input = {job_input[0]: job_input[1]} elif isinstance(job_input, list): if all(isinstance(item, tuple) for item in job_input): job_input = dict(job_input) else: job_input = {"input": job_input} elif not isinstance(job_input, dict): raise ValueError("Invalid job input requires(). Supported types: [" "Target, tuple of (name, Target), " "dict of (name: Target), list of Targets]") if not isinstance(self.file_pattern(), dict): raise ValueError('file_pattern() must return a dict type') input_args = [] for (name, targets) in job_input.items(): uris = [ self.get_target_path(uri_target) for uri_target in luigi.task.flatten(targets) ] if isinstance(targets, dict): """ If targets is a dict that means it had multiple outputs. Make the input args in that case "<input key>-<task output key>" """ names = ["%s-%s" % (name, key) for key in targets.keys()] else: names = [name] * len(uris) input_dict = {} for (arg_name, uri) in zip(names, uris): pattern = self.file_pattern().get(name, 'part-*') input_value = input_dict.get(arg_name, []) input_value.append(uri.rstrip('/') + '/' + pattern) input_dict[arg_name] = input_value for (key, paths) in input_dict.items(): input_args.append("--%s=%s" % (key, ','.join(paths))) return input_args def _format_output_args(self): """ Parses the result(s) of self.output() into a string-serialized key-value list passed to the Dataflow job. Valid outputs include: return FooTarget() return {"output1": FooTarget(), "output2": FooTarget2()} Unlabeled outputs are passed in with under the default key "output". """ job_output = self.output() if isinstance(job_output, luigi.Target): job_output = {"output": job_output} elif not isinstance(job_output, dict): raise ValueError( "Task output must be a Target or a dict from String to Target") output_args = [] for (name, target) in job_output.items(): uri = self.get_target_path(target) output_args.append("--%s=%s" % (name, uri)) return output_args @staticmethod def get_target_path(target): if isinstance(target, luigi.LocalTarget) or isinstance(target, gcs.GCSTarget): return target.path elif isinstance(target, bigquery.BigQueryTarget): "{}:{}.{}".format(target.project_id, target.dataset_id, target.table_id) else: raise ValueError("Target not supported") # -*- coding: utf-8 -*- # # Copyright 2019 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc from abc import abstractmethod, abstractproperty, ABCMeta import logging import json import os import subprocess import luigi from luigi import six from luigi.contrib import bigquery, gcs from luigi.task import MixinNaiveBulkComplete logger = logging.getLogger('luigi-interface') @six.add_metaclass(abc.ABCMeta) class DataflowParamKeys(object): """ Defines the naming conventions for Dataflow execution params. For example, the Java API expects param names in lower camel case, whereas the Python implementation expects snake case. """ @abstractproperty def runner(self): pass @abstractproperty def project(self): pass @abstractproperty def zone(self): pass @abstractproperty def region(self): pass @abstractproperty def staging_location(self): pass @abstractproperty def temp_location(self): pass @abstractproperty def gcp_temp_location(self): pass @abstractproperty def num_workers(self): pass @abstractproperty def autoscaling_algorithm(self): pass @abstractproperty def max_num_workers(self): pass @abstractproperty def disk_size_gb(self): pass @abstractproperty def worker_machine_type(self): pass @abstractproperty def worker_disk_type(self): pass @abstractproperty def job_name(self): pass @abstractproperty def service_account(self): pass @abstractproperty def network(self): pass @abstractproperty def subnetwork(self): pass @abstractproperty def labels(self): pass class _CmdLineRunner(object): """ Executes a given command line class in a subprocess, logging its output. If more complex monitoring/logging is desired, user can implement their own launcher class and set it in BeamDataflowJobTask.cmd_line_runner. """ @staticmethod def run(cmd, task=None): process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True ) output_lines = [] while True: line = process.stdout.readline() if not line: break line = line.decode("utf-8") output_lines += [line] logger.info(line.rstrip("\n")) process.stdout.close() exit_code = process.wait() if exit_code: output = "".join(output_lines) raise subprocess.CalledProcessError(exit_code, cmd, output=output) @six.add_metaclass(ABCMeta) class BeamDataflowJobTask(MixinNaiveBulkComplete, luigi.Task): """ Luigi wrapper for a Dataflow job. Must be overridden for each Beam SDK with that SDK's dataflow_executable(). For more documentation, see: https://cloud.google.com/dataflow/docs/guides/specifying-exec-params The following required Dataflow properties must be set: project # GCP project ID temp_location # Cloud storage path for temporary files The following optional Dataflow properties can be set: runner # PipelineRunner implementation for your Beam job. Default: DirectRunner num_workers # The number of workers to start the task with Default: Determined by Dataflow service autoscaling_algorithm # The Autoscaling mode for the Dataflow job Default: `THROUGHPUT_BASED` max_num_workers # Used if the autoscaling is enabled Default: Determined by Dataflow service network # Network in GCE to be used for launching workers Default: a network named "default" subnetwork # Subnetwork in GCE to be used for launching workers Default: Determined by Dataflow service disk_size_gb # Remote worker disk size. Minimum value is 30GB Default: set to 0 to use GCP project default worker_machine_type # Machine type to create Dataflow worker VMs Default: Determined by Dataflow service job_name # Custom job name, must be unique across project's active jobs worker_disk_type # Specify SSD for local disk or defaults to hard disk as a full URL of disk type resource Default: Determined by Dataflow service. service_account # Service account of Dataflow VMs/workers Default: active GCE service account region # Region to deploy Dataflow job to Default: us-central1 zone # Availability zone for launching workers instances Default: an available zone in the specified region staging_location # Cloud Storage bucket for Dataflow to stage binary files Default: the value of temp_location gcp_temp_location # Cloud Storage path for Dataflow to stage temporary files Default: the value of temp_location labels # Custom GCP labels attached to the Dataflow job Default: nothing """ project = None runner = None temp_location = None staging_location = None gcp_temp_location = None num_workers = None autoscaling_algorithm = None max_num_workers = None network = None subnetwork = None disk_size_gb = None worker_machine_type = None job_name = None worker_disk_type = None service_account = None zone = None region = None labels = {} cmd_line_runner = _CmdLineRunner dataflow_params = None def __init__(self): if not isinstance(self.dataflow_params, DataflowParamKeys): raise ValueError("dataflow_params must be of type DataflowParamKeys") @abstractmethod def dataflow_executable(self): """ Command representing the Dataflow executable to be run. For example: return ['java', 'com.spotify.luigi.MyClass', '-Xmx256m'] """ pass def args(self): """ Extra String arguments that will be passed to your Dataflow job. For example: return ['--setup_file=setup.py'] """ return [] def before_run(self): """ Hook that gets called right before the Dataflow job is launched. Can be used to setup any temporary files/tables, validate input, etc. """ pass def on_successful_run(self): """ Callback that gets called right after the Dataflow job has finished successfully but before validate_output is run. """ pass def validate_output(self): """ Callback that can be used to validate your output before it is moved to its final location. Returning false here will cause the job to fail, and output to be removed instead of published. """ return True def file_pattern(self): """ If one/some of the input target files are not in the pattern of part-*, we can add the key of the required target and the correct file pattern that should be appended in the command line here. If the input target key is not found in this dict, the file pattern will be assumed to be part-* for that target. :return A dictionary of overridden file pattern that is not part-* for the inputs """ return {} def on_successful_output_validation(self): """ Callback that gets called after the Dataflow job has finished successfully if validate_output returns True. """ pass def cleanup_on_error(self, error): """ Callback that gets called after the Dataflow job has finished unsuccessfully, or validate_output returns False. """ pass def run(self): cmd_line = self._mk_cmd_line() logger.info(' '.join(cmd_line)) self.before_run() try: self.cmd_line_runner.run(cmd_line, self) except subprocess.CalledProcessError as e: logger.error(e, exc_info=True) self.cleanup_on_error(e) os._exit(e.returncode) self.on_successful_run() if self.validate_output(): self.on_successful_output_validation() else: error = ValueError("Output validation failed") self.cleanup_on_error(error) raise error def _mk_cmd_line(self): cmd_line = self.dataflow_executable() cmd_line.extend(self._get_dataflow_args()) cmd_line.extend(self.args()) cmd_line.extend(self._format_input_args()) cmd_line.extend(self._format_output_args()) return cmd_line def _get_runner(self): if not self.runner: logger.warning("Runner not supplied to BeamDataflowJobTask. " + "Defaulting to DirectRunner.") return "DirectRunner" elif self.runner in [ "DataflowRunner", "DirectRunner" ]: return self.runner else: raise ValueError("Runner %s is unsupported." % self.runner) def _get_dataflow_args(self): def f(key, value): return '--{}={}'.format(key, value) output = [] output.append(f(self.dataflow_params.runner, self._get_runner())) if self.project: output.append(f(self.dataflow_params.project, self.project)) if self.zone: output.append(f(self.dataflow_params.zone, self.zone)) if self.region: output.append(f(self.dataflow_params.region, self.region)) if self.staging_location: output.append(f(self.dataflow_params.staging_location, self.staging_location)) if self.temp_location: output.append(f(self.dataflow_params.temp_location, self.temp_location)) if self.gcp_temp_location: output.append(f(self.dataflow_params.gcp_temp_location, self.gcp_temp_location)) if self.num_workers: output.append(f(self.dataflow_params.num_workers, self.num_workers)) if self.autoscaling_algorithm: output.append(f(self.dataflow_params.autoscaling_algorithm, self.autoscaling_algorithm)) if self.max_num_workers: output.append(f(self.dataflow_params.max_num_workers, self.max_num_workers)) if self.disk_size_gb: output.append(f(self.dataflow_params.disk_size_gb, self.disk_size_gb)) if self.worker_machine_type: output.append(f(self.dataflow_params.worker_machine_type, self.worker_machine_type)) if self.worker_disk_type: output.append(f(self.dataflow_params.worker_disk_type, self.worker_disk_type)) if self.network: output.append(f(self.dataflow_params.network, self.network)) if self.subnetwork: output.append(f(self.dataflow_params.subnetwork, self.subnetwork)) if self.job_name: output.append(f(self.dataflow_params.job_name, self.job_name)) if self.service_account: output.append(f(self.dataflow_params.service_account, self.service_account)) if self.labels: output.append(f(self.dataflow_params.labels, json.dumps(self.labels))) return output def _format_input_args(self): """ Parses the result(s) of self.input() into a string-serialized key-value list passed to the Dataflow job. Valid inputs include: return FooTarget() return {"input1": FooTarget(), "input2": FooTarget2()) return ("input", FooTarget()) return [("input1", FooTarget()), ("input2": FooTarget2())] return [FooTarget(), FooTarget2()] Unlabeled input are passed in with under the default key "input". """ job_input = self.input() if isinstance(job_input, luigi.Target): job_input = {"input": job_input} elif isinstance(job_input, tuple): job_input = {job_input[0]: job_input[1]} elif isinstance(job_input, list): if all(isinstance(item, tuple) for item in job_input): job_input = dict(job_input) else: job_input = {"input": job_input} elif not isinstance(job_input, dict): raise ValueError("Invalid job input requires(). Supported types: [" "Target, tuple of (name, Target), " "dict of (name: Target), list of Targets]") if not isinstance(self.file_pattern(), dict): raise ValueError('file_pattern() must return a dict type') input_args = [] for (name, targets) in job_input.items(): uris = [ self.get_target_path(uri_target) for uri_target in luigi.task.flatten(targets) ] if isinstance(targets, dict): """ If targets is a dict that means it had multiple outputs. Make the input args in that case "<input key>-<task output key>" """ names = ["%s-%s" % (name, key) for key in targets.keys()] else: names = [name] * len(uris) input_dict = {} for (arg_name, uri) in zip(names, uris): pattern = self.file_pattern().get(name, 'part-*') input_value = input_dict.get(arg_name, []) input_value.append(uri.rstrip('/') + '/' + pattern) input_dict[arg_name] = input_value for (key, paths) in input_dict.items(): input_args.append("--%s=%s" % (key, ','.join(paths))) return input_args def _format_output_args(self): """ Parses the result(s) of self.output() into a string-serialized key-value list passed to the Dataflow job. Valid outputs include: return FooTarget() return {"output1": FooTarget(), "output2": FooTarget2()} Unlabeled outputs are passed in with under the default key "output". """ job_output = self.output() if isinstance(job_output, luigi.Target): job_output = {"output": job_output} elif not isinstance(job_output, dict): raise ValueError( "Task output must be a Target or a dict from String to Target") output_args = [] for (name, target) in job_output.items(): uri = self.get_target_path(target) output_args.append("--%s=%s" % (name, uri)) return output_args @staticmethod def get_target_path(target): if isinstance(target, luigi.LocalTarget) or isinstance(target, gcs.GCSTarget): return target.path elif isinstance(target, bigquery.BigQueryTarget): "{}:{}.{}".format(target.project_id, target.dataset_id, target.table_id) else: raise ValueError("Target not supported")
BugsInPy/BugsInPy/temp/projects/luigi/bug-2-fixed/luigi/luigi/contrib/beam_dataflow.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-2-buggy/luigi/luigi/contrib/beam_dataflow.py
luigi-bug-25
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc import json import logging import time import luigi from luigi import postgres from luigi.contrib import rdbms from luigi.s3 import S3PathTask, S3Target logger = logging.getLogger('luigi-interface') try: import psycopg2 import psycopg2.errorcodes except ImportError: logger.warning("Loading postgres module without psycopg2 installed. " "Will crash at runtime if postgres functionality is used.") class RedshiftTarget(postgres.PostgresTarget): """ Target for a resource in Redshift. Redshift is similar to postgres with a few adjustments required by redshift. """ marker_table = luigi.configuration.get_config().get( 'redshift', 'marker-table', 'table_updates') use_db_timestamps = False class S3CopyToTable(rdbms.CopyToTable): """ Template task for inserting a data set into Redshift from s3. Usage: * Subclass and override the required attributes: * `host`, * `database`, * `user`, * `password`, * `table`, * `columns`, * `aws_access_key_id`, * `aws_secret_access_key`, * `s3_load_path`. """ @abc.abstractproperty def s3_load_path(self): """ Override to return the load path. """ return None @abc.abstractproperty def aws_access_key_id(self): """ Override to return the key id. """ return None @abc.abstractproperty def aws_secret_access_key(self): """ Override to return the secret access key. """ return None @abc.abstractproperty def copy_options(self): """ Add extra copy options, for example: * TIMEFORMAT 'auto' * IGNOREHEADER 1 * TRUNCATECOLUMNS * IGNOREBLANKLINES """ return '' def table_attributes(self): '''Add extra table attributes, for example: DISTSTYLE KEY DISTKEY (MY_FIELD) SORTKEY (MY_FIELD_2, MY_FIELD_3) ''' return '' def do_truncate_table(self): """ Return True if table should be truncated before copying new data in. """ return False def truncate_table(self, connection): query = "truncate %s" % self.table cursor = connection.cursor() try: cursor.execute(query) finally: cursor.close() def create_table(self, connection): """ Override to provide code for creating the target table. By default it will be created using types (optionally) specified in columns. If overridden, use the provided connection object for setting up the table in order to create the table and insert data using the same transaction. """ if len(self.columns[0]) == 1: # only names of columns specified, no types raise NotImplementedError("create_table() not implemented " "for %r and columns types not " "specified" % self.table) elif len(self.columns[0]) == 2: # if columns is specified as (name, type) tuples coldefs = ','.join( '{name} {type}'.format( name=name, type=type) for name, type in self.columns ) query = ("CREATE TABLE " "{table} ({coldefs}) " "{table_attributes}").format( table=self.table, coldefs=coldefs, table_attributes=self.table_attributes()) connection.cursor().execute(query) def run(self): """ If the target table doesn't exist, self.create_table will be called to attempt to create the table. """ if not (self.table): raise Exception("table need to be specified") path = self.s3_load_path() connection = self.output().connect() if not self.does_table_exist(connection): # try creating table logger.info("Creating table %s", self.table) connection.reset() self.create_table(connection) elif self.do_truncate_table(): logger.info("Truncating table %s", self.table) self.truncate_table(connection) logger.info("Inserting file: %s", path) cursor = connection.cursor() self.init_copy(connection) self.copy(cursor, path) self.output().touch(connection) connection.commit() # commit and clean up connection.close() def copy(self, cursor, f): """ Defines copying from s3 into redshift. """ cursor.execute(""" COPY %s from '%s' CREDENTIALS 'aws_access_key_id=%s;aws_secret_access_key=%s' delimiter '%s' %s ;""" % (self.table, f, self.aws_access_key_id, self.aws_secret_access_key, self.column_separator, self.copy_options)) def output(self): """ Returns a RedshiftTarget representing the inserted dataset. Normally you don't override this. """ return RedshiftTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.table, update_id=self.update_id()) def does_table_exist(self, connection): """ Determine whether the table already exists. """ query = ("select 1 as table_exists " "from pg_table_def " "where tablename = %s limit 1") cursor = connection.cursor() try: cursor.execute(query, (self.table,)) result = cursor.fetchone() return bool(result) finally: cursor.close() class S3CopyJSONToTable(S3CopyToTable): """ Template task for inserting a JSON data set into Redshift from s3. Usage: * Subclass and override the required attributes: * `host`, * `database`, * `user`, * `password`, * `table`, * `columns`, * `aws_access_key_id`, * `aws_secret_access_key`, * `s3_load_path`, * `jsonpath`, * `copy_json_options`. """ @abc.abstractproperty def jsonpath(self): """ Override the jsonpath schema location for the table. """ return '' @abc.abstractproperty def copy_json_options(self): """ Add extra copy options, for example: * GZIP * LZOP """ return '' def copy(self, cursor, f): """ Defines copying JSON from s3 into redshift. """ cursor.execute(""" COPY %s from '%s' CREDENTIALS 'aws_access_key_id=%s;aws_secret_access_key=%s' JSON AS '%s' %s %s ;""" % (self.table, f, self.aws_access_key_id, self.aws_secret_access_key, self.jsonpath, self.copy_json_options, self.copy_options)) class RedshiftManifestTask(S3PathTask): """ Generic task to generate a manifest file that can be used in S3CopyToTable in order to copy multiple files from your s3 folder into a redshift table at once. For full description on how to use the manifest file see http://docs.aws.amazon.com/redshift/latest/dg/loading-data-files-using-manifest.html Usage: * requires parameters * path - s3 path to the generated manifest file, including the name of the generated file to be copied into a redshift table * folder_paths - s3 paths to the folders containing files you wish to be copied Output: * generated manifest file """ # should be over ridden to point to a variety # of folders you wish to copy from folder_paths = luigi.Parameter() text_target = True def run(self): entries = [] for folder_path in self.folder_paths: s3 = S3Target(folder_path) client = s3.fs for file_name in client.list(s3.path): entries.append({ 'url': '%s/%s' % (folder_path, file_name), 'mandatory': True }) manifest = {'entries': entries} target = self.output().open('w') dump = json.dumps(manifest) if not self.text_target: dump = dump.encode('utf8') target.write(dump) target.close() class KillOpenRedshiftSessions(luigi.Task): """ An task for killing any open Redshift sessions in a given database. This is necessary to prevent open user sessions with transactions against the table from blocking drop or truncate table commands. Usage: Subclass and override the required `host`, `database`, `user`, and `password` attributes. """ # time in seconds to wait before # reconnecting to Redshift if our session is killed too. # 30 seconds is usually fine; 60 is conservative connection_reset_wait_seconds = luigi.IntParameter(default=60) @abc.abstractproperty def host(self): return None @abc.abstractproperty def database(self): return None @abc.abstractproperty def user(self): return None @abc.abstractproperty def password(self): return None def update_id(self): """ This update id will be a unique identifier for this insert on this table. """ return self.task_id def output(self): """ Returns a RedshiftTarget representing the inserted dataset. Normally you don't override this. """ # uses class name as a meta-table return RedshiftTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.__class__.__name__, update_id=self.update_id()) def run(self): """ Kill any open Redshift sessions for the given database. """ connection = self.output().connect() # kill any sessions other than ours and # internal Redshift sessions (rdsdb) query = ("select pg_terminate_backend(process) " "from STV_SESSIONS " "where db_name=%s " "and user_name != 'rdsdb' " "and process != pg_backend_pid()") cursor = connection.cursor() logger.info('Killing all open Redshift sessions for database: %s', self.database) try: cursor.execute(query, (self.database,)) cursor.close() connection.commit() except psycopg2.DatabaseError as e: if e.message and 'EOF' in e.message: # sometimes this operation kills the current session. # rebuild the connection. Need to pause for 30-60 seconds # before Redshift will allow us back in. connection.close() logger.info('Pausing %s seconds for Redshift to reset connection', self.connection_reset_wait_seconds) time.sleep(self.connection_reset_wait_seconds) logger.info('Reconnecting to Redshift') connection = self.output().connect() else: raise try: self.output().touch(connection) connection.commit() finally: connection.close() logger.info('Done killing all open Redshift sessions for database: %s', self.database) # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc import json import logging import time import luigi from luigi import postgres from luigi.contrib import rdbms from luigi.s3 import S3PathTask, S3Target logger = logging.getLogger('luigi-interface') try: import psycopg2 import psycopg2.errorcodes except ImportError: logger.warning("Loading postgres module without psycopg2 installed. " "Will crash at runtime if postgres functionality is used.") class RedshiftTarget(postgres.PostgresTarget): """ Target for a resource in Redshift. Redshift is similar to postgres with a few adjustments required by redshift. """ marker_table = luigi.configuration.get_config().get( 'redshift', 'marker-table', 'table_updates') use_db_timestamps = False class S3CopyToTable(rdbms.CopyToTable): """ Template task for inserting a data set into Redshift from s3. Usage: * Subclass and override the required attributes: * `host`, * `database`, * `user`, * `password`, * `table`, * `columns`, * `aws_access_key_id`, * `aws_secret_access_key`, * `s3_load_path`. """ @abc.abstractproperty def s3_load_path(self): """ Override to return the load path. """ return None @abc.abstractproperty def aws_access_key_id(self): """ Override to return the key id. """ return None @abc.abstractproperty def aws_secret_access_key(self): """ Override to return the secret access key. """ return None @abc.abstractproperty def copy_options(self): """ Add extra copy options, for example: * TIMEFORMAT 'auto' * IGNOREHEADER 1 * TRUNCATECOLUMNS * IGNOREBLANKLINES """ return '' def table_attributes(self): '''Add extra table attributes, for example: DISTSTYLE KEY DISTKEY (MY_FIELD) SORTKEY (MY_FIELD_2, MY_FIELD_3) ''' return '' def do_truncate_table(self): """ Return True if table should be truncated before copying new data in. """ return False def truncate_table(self, connection): query = "truncate %s" % self.table cursor = connection.cursor() try: cursor.execute(query) finally: cursor.close() def create_table(self, connection): """ Override to provide code for creating the target table. By default it will be created using types (optionally) specified in columns. If overridden, use the provided connection object for setting up the table in order to create the table and insert data using the same transaction. """ if len(self.columns[0]) == 1: # only names of columns specified, no types raise NotImplementedError("create_table() not implemented " "for %r and columns types not " "specified" % self.table) elif len(self.columns[0]) == 2: # if columns is specified as (name, type) tuples coldefs = ','.join( '{name} {type}'.format( name=name, type=type) for name, type in self.columns ) query = ("CREATE TABLE " "{table} ({coldefs}) " "{table_attributes}").format( table=self.table, coldefs=coldefs, table_attributes=self.table_attributes()) connection.cursor().execute(query) def run(self): """ If the target table doesn't exist, self.create_table will be called to attempt to create the table. """ if not (self.table): raise Exception("table need to be specified") path = self.s3_load_path() connection = self.output().connect() if not self.does_table_exist(connection): # try creating table logger.info("Creating table %s", self.table) connection.reset() self.create_table(connection) elif self.do_truncate_table(): logger.info("Truncating table %s", self.table) self.truncate_table(connection) logger.info("Inserting file: %s", path) cursor = connection.cursor() self.init_copy(connection) self.copy(cursor, path) self.output().touch(connection) connection.commit() # commit and clean up connection.close() def copy(self, cursor, f): """ Defines copying from s3 into redshift. """ cursor.execute(""" COPY %s from '%s' CREDENTIALS 'aws_access_key_id=%s;aws_secret_access_key=%s' delimiter '%s' %s ;""" % (self.table, f, self.aws_access_key_id, self.aws_secret_access_key, self.column_separator, self.copy_options)) def output(self): """ Returns a RedshiftTarget representing the inserted dataset. Normally you don't override this. """ return RedshiftTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.table, update_id=self.update_id()) def does_table_exist(self, connection): """ Determine whether the table already exists. """ query = ("select 1 as table_exists " "from pg_table_def " "where tablename = %s limit 1") cursor = connection.cursor() try: cursor.execute(query, (self.table,)) result = cursor.fetchone() return bool(result) finally: cursor.close() class S3CopyJSONToTable(S3CopyToTable): """ Template task for inserting a JSON data set into Redshift from s3. Usage: * Subclass and override the required attributes: * `host`, * `database`, * `user`, * `password`, * `table`, * `columns`, * `aws_access_key_id`, * `aws_secret_access_key`, * `s3_load_path`, * `jsonpath`, * `copy_json_options`. """ @abc.abstractproperty def jsonpath(self): """ Override the jsonpath schema location for the table. """ return '' @abc.abstractproperty def copy_json_options(self): """ Add extra copy options, for example: * GZIP * LZOP """ return '' def copy(self, cursor, f): """ Defines copying JSON from s3 into redshift. """ cursor.execute(""" COPY %s from '%s' CREDENTIALS 'aws_access_key_id=%s;aws_secret_access_key=%s' JSON AS '%s' %s %s ;""" % (self.table, f, self.aws_access_key_id, self.aws_secret_access_key, self.jsonpath, self.copy_json_options, self.copy_options)) class RedshiftManifestTask(S3PathTask): """ Generic task to generate a manifest file that can be used in S3CopyToTable in order to copy multiple files from your s3 folder into a redshift table at once. For full description on how to use the manifest file see http://docs.aws.amazon.com/redshift/latest/dg/loading-data-files-using-manifest.html Usage: * requires parameters * path - s3 path to the generated manifest file, including the name of the generated file to be copied into a redshift table * folder_paths - s3 paths to the folders containing files you wish to be copied Output: * generated manifest file """ # should be over ridden to point to a variety # of folders you wish to copy from folder_paths = luigi.Parameter() text_target = True def run(self): entries = [] for folder_path in self.folder_paths: s3 = S3Target(folder_path) client = s3.fs for file_name in client.list(s3.path): entries.append({ 'url': '%s/%s' % (folder_path, file_name), 'mandatory': True }) manifest = {'entries': entries} target = self.output().open('w') dump = json.dumps(manifest) if not self.text_target: dump = dump.encode('utf8') target.write(dump) target.close() class KillOpenRedshiftSessions(luigi.Task): """ An task for killing any open Redshift sessions in a given database. This is necessary to prevent open user sessions with transactions against the table from blocking drop or truncate table commands. Usage: Subclass and override the required `host`, `database`, `user`, and `password` attributes. """ # time in seconds to wait before # reconnecting to Redshift if our session is killed too. # 30 seconds is usually fine; 60 is conservative connection_reset_wait_seconds = luigi.IntParameter(default=60) @abc.abstractproperty def host(self): return None @abc.abstractproperty def database(self): return None @abc.abstractproperty def user(self): return None @abc.abstractproperty def password(self): return None def update_id(self): """ This update id will be a unique identifier for this insert on this table. """ return self.task_id def output(self): """ Returns a RedshiftTarget representing the inserted dataset. Normally you don't override this. """ # uses class name as a meta-table return RedshiftTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.__class__.__name__, update_id=self.update_id()) def run(self): """ Kill any open Redshift sessions for the given database. """ connection = self.output().connect() # kill any sessions other than ours and # internal Redshift sessions (rdsdb) query = ("select pg_terminate_backend(process) " "from STV_SESSIONS " "where db_name=%s " "and user_name != 'rdsdb' " "and process != pg_backend_pid()") cursor = connection.cursor() logger.info('Killing all open Redshift sessions for database: %s', self.database) try: cursor.execute(query, (self.database,)) cursor.close() connection.commit() except psycopg2.DatabaseError as e: if e.message and 'EOF' in e.message: # sometimes this operation kills the current session. # rebuild the connection. Need to pause for 30-60 seconds # before Redshift will allow us back in. connection.close() logger.info('Pausing %s seconds for Redshift to reset connection', self.connection_reset_wait_seconds) time.sleep(self.connection_reset_wait_seconds) logger.info('Reconnecting to Redshift') connection = self.output().connect() else: raise try: self.output().touch(connection) connection.commit() finally: connection.close() logger.info('Done killing all open Redshift sessions for database: %s', self.database)
BugsInPy/BugsInPy/temp/projects/luigi/bug-25-fixed/luigi/luigi/contrib/redshift.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-25-buggy/luigi/luigi/contrib/redshift.py
luigi-bug-4
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc import json import logging import time import os import luigi from luigi.contrib import postgres from luigi.contrib import rdbms from luigi.contrib.s3 import S3PathTask, S3Target logger = logging.getLogger('luigi-interface') try: import psycopg2 import psycopg2.errorcodes except ImportError: logger.warning("Loading postgres module without psycopg2 installed. " "Will crash at runtime if postgres functionality is used.") class _CredentialsMixin(): """ This mixin is used to provide the same credential properties for AWS to all Redshift tasks. It also provides a helper method to generate the credentials string for the task. """ @property def configuration_section(self): """ Override to change the configuration section used to obtain default credentials. """ return 'redshift' @property def aws_access_key_id(self): """ Override to return the key id. """ return self._get_configuration_attribute('aws_access_key_id') @property def aws_secret_access_key(self): """ Override to return the secret access key. """ return self._get_configuration_attribute('aws_secret_access_key') @property def aws_account_id(self): """ Override to return the account id. """ return self._get_configuration_attribute('aws_account_id') @property def aws_arn_role_name(self): """ Override to return the arn role name. """ return self._get_configuration_attribute('aws_arn_role_name') @property def aws_session_token(self): """ Override to return the session token. """ return self._get_configuration_attribute('aws_session_token') def _get_configuration_attribute(self, attribute): config = luigi.configuration.get_config() value = config.get(self.configuration_section, attribute, default=None) if not value: value = os.environ.get(attribute.upper(), None) return value def _credentials(self): """ Return a credential string for the provided task. If no valid credentials are set, raise a NotImplementedError. """ if self.aws_account_id and self.aws_arn_role_name: return 'aws_iam_role=arn:aws:iam::{id}:role/{role}'.format( id=self.aws_account_id, role=self.aws_arn_role_name ) elif self.aws_access_key_id and self.aws_secret_access_key: return 'aws_access_key_id={key};aws_secret_access_key={secret}{opt}'.format( key=self.aws_access_key_id, secret=self.aws_secret_access_key, opt=';token={}'.format(self.aws_session_token) if self.aws_session_token else '' ) else: raise NotImplementedError("Missing Credentials. " "Ensure one of the pairs of auth args below are set " "in a configuration file, environment variables or by " "being overridden in the task: " "'aws_access_key_id' AND 'aws_secret_access_key' OR " "'aws_account_id' AND 'aws_arn_role_name'") class RedshiftTarget(postgres.PostgresTarget): """ Target for a resource in Redshift. Redshift is similar to postgres with a few adjustments required by redshift. """ marker_table = luigi.configuration.get_config().get( 'redshift', 'marker-table', 'table_updates') use_db_timestamps = False class S3CopyToTable(rdbms.CopyToTable, _CredentialsMixin): """ Template task for inserting a data set into Redshift from s3. Usage: * Subclass and override the required attributes: * `host`, * `database`, * `user`, * `password`, * `table`, * `columns`, * `s3_load_path`. * You can also override the attributes provided by the CredentialsMixin if they are not supplied by your configuration or environment variables. """ @abc.abstractmethod def s3_load_path(self): """ Override to return the load path. """ return None @abc.abstractproperty def copy_options(self): """ Add extra copy options, for example: * TIMEFORMAT 'auto' * IGNOREHEADER 1 * TRUNCATECOLUMNS * IGNOREBLANKLINES * DELIMITER '\t' """ return '' @property def prune_table(self): """ Override to set equal to the name of the table which is to be pruned. Intended to be used in conjunction with prune_column and prune_date i.e. copy to temp table, prune production table to prune_column with a date greater than prune_date, then insert into production table from temp table """ return None @property def prune_column(self): """ Override to set equal to the column of the prune_table which is to be compared Intended to be used in conjunction with prune_table and prune_date i.e. copy to temp table, prune production table to prune_column with a date greater than prune_date, then insert into production table from temp table """ return None @property def prune_date(self): """ Override to set equal to the date by which prune_column is to be compared Intended to be used in conjunction with prune_table and prune_column i.e. copy to temp table, prune production table to prune_column with a date greater than prune_date, then insert into production table from temp table """ return None @property def table_attributes(self): """ Add extra table attributes, for example: DISTSTYLE KEY DISTKEY (MY_FIELD) SORTKEY (MY_FIELD_2, MY_FIELD_3) """ return '' @property def do_truncate_table(self): """ Return True if table should be truncated before copying new data in. """ return False def do_prune(self): """ Return True if prune_table, prune_column, and prune_date are implemented. If only a subset of prune variables are override, an exception is raised to remind the user to implement all or none. Prune (data newer than prune_date deleted) before copying new data in. """ if self.prune_table and self.prune_column and self.prune_date: return True elif self.prune_table or self.prune_column or self.prune_date: raise Exception('override zero or all prune variables') else: return False @property def table_type(self): """ Return table type (i.e. 'temp'). """ return '' @property def queries(self): """ Override to return a list of queries to be executed in order. """ return [] def truncate_table(self, connection): query = "truncate %s" % self.table cursor = connection.cursor() try: cursor.execute(query) finally: cursor.close() def prune(self, connection): query = "delete from %s where %s >= %s" % (self.prune_table, self.prune_column, self.prune_date) cursor = connection.cursor() try: cursor.execute(query) finally: cursor.close() def create_table(self, connection): """ Override to provide code for creating the target table. By default it will be created using types (optionally) specified in columns. If overridden, use the provided connection object for setting up the table in order to create the table and insert data using the same transaction. """ if len(self.columns[0]) == 1: # only names of columns specified, no types raise NotImplementedError("create_table() not implemented " "for %r and columns types not " "specified" % self.table) elif len(self.columns[0]) == 2: # if columns is specified as (name, type) tuples coldefs = ','.join( '{name} {type}'.format( name=name, type=type) for name, type in self.columns ) query = ("CREATE {type} TABLE " "{table} ({coldefs}) " "{table_attributes}").format( type=self.table_type, table=self.table, coldefs=coldefs, table_attributes=self.table_attributes) connection.cursor().execute(query) elif len(self.columns[0]) == 3: # if columns is specified as (name, type, encoding) tuples # possible column encodings: https://docs.aws.amazon.com/redshift/latest/dg/c_Compression_encodings.html coldefs = ','.join( '{name} {type} ENCODE {encoding}'.format( name=name, type=type, encoding=encoding) for name, type, encoding in self.columns ) query = ("CREATE {type} TABLE " "{table} ({coldefs}) " "{table_attributes}").format( type=self.table_type, table=self.table, coldefs=coldefs, table_attributes=self.table_attributes) connection.cursor().execute(query) else: raise ValueError("create_table() found no columns for %r" % self.table) def run(self): """ If the target table doesn't exist, self.create_table will be called to attempt to create the table. """ if not (self.table): raise Exception("table need to be specified") path = self.s3_load_path() output = self.output() connection = output.connect() cursor = connection.cursor() self.init_copy(connection) self.copy(cursor, path) self.post_copy(cursor) # update marker table output.touch(connection) connection.commit() # commit and clean up connection.close() def copy(self, cursor, f): """ Defines copying from s3 into redshift. If both key-based and role-based credentials are provided, role-based will be used. """ logger.info("Inserting file: %s", f) colnames = '' if len(self.columns) > 0: colnames = ",".join([x[0] for x in self.columns]) colnames = '({})'.format(colnames) cursor.execute(""" COPY {table} {colnames} from '{source}' CREDENTIALS '{creds}' {options} ;""".format( table=self.table, colnames=colnames, source=f, creds=self._credentials(), options=self.copy_options) ) def output(self): """ Returns a RedshiftTarget representing the inserted dataset. Normally you don't override this. """ return RedshiftTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.table, update_id=self.update_id) def does_table_exist(self, connection): """ Determine whether the table already exists. """ if '.' in self.table: query = ("select 1 as table_exists " "from information_schema.tables " "where table_schema = lower(%s) and table_name = lower(%s) limit 1") else: query = ("select 1 as table_exists " "from pg_table_def " "where tablename = lower(%s) limit 1") cursor = connection.cursor() try: cursor.execute(query, tuple(self.table.split('.'))) result = cursor.fetchone() return bool(result) finally: cursor.close() def init_copy(self, connection): """ Perform pre-copy sql - such as creating table, truncating, or removing data older than x. """ if not self.does_table_exist(connection): logger.info("Creating table %s", self.table) connection.reset() self.create_table(connection) if self.do_truncate_table: logger.info("Truncating table %s", self.table) self.truncate_table(connection) if self.do_prune(): logger.info("Removing %s older than %s from %s", self.prune_column, self.prune_date, self.prune_table) self.prune(connection) def post_copy(self, cursor): """ Performs post-copy sql - such as cleansing data, inserting into production table (if copied to temp table), etc. """ logger.info('Executing post copy queries') for query in self.queries: cursor.execute(query) class S3CopyJSONToTable(S3CopyToTable, _CredentialsMixin): """ Template task for inserting a JSON data set into Redshift from s3. Usage: * Subclass and override the required attributes: * `host`, * `database`, * `user`, * `password`, * `table`, * `columns`, * `s3_load_path`, * `jsonpath`, * `copy_json_options`. * You can also override the attributes provided by the CredentialsMixin if they are not supplied by your configuration or environment variables. """ @abc.abstractproperty def jsonpath(self): """ Override the jsonpath schema location for the table. """ return '' @abc.abstractproperty def copy_json_options(self): """ Add extra copy options, for example: * GZIP * LZOP """ return '' def copy(self, cursor, f): """ Defines copying JSON from s3 into redshift. """ logger.info("Inserting file: %s", f) cursor.execute(""" COPY %s from '%s' CREDENTIALS '%s' JSON AS '%s' %s %s ;""" % (self.table, f, self._credentials(), self.jsonpath, self.copy_json_options, self.copy_options)) class RedshiftManifestTask(S3PathTask): """ Generic task to generate a manifest file that can be used in S3CopyToTable in order to copy multiple files from your s3 folder into a redshift table at once. For full description on how to use the manifest file see http://docs.aws.amazon.com/redshift/latest/dg/loading-data-files-using-manifest.html Usage: * requires parameters * path - s3 path to the generated manifest file, including the name of the generated file to be copied into a redshift table * folder_paths - s3 paths to the folders containing files you wish to be copied Output: * generated manifest file """ # should be over ridden to point to a variety # of folders you wish to copy from folder_paths = luigi.Parameter() text_target = True def run(self): entries = [] for folder_path in self.folder_paths: s3 = S3Target(folder_path) client = s3.fs for file_name in client.list(s3.path): entries.append({ 'url': '%s/%s' % (folder_path, file_name), 'mandatory': True }) manifest = {'entries': entries} target = self.output().open('w') dump = json.dumps(manifest) if not self.text_target: dump = dump.encode('utf8') target.write(dump) target.close() class KillOpenRedshiftSessions(luigi.Task): """ An task for killing any open Redshift sessions in a given database. This is necessary to prevent open user sessions with transactions against the table from blocking drop or truncate table commands. Usage: Subclass and override the required `host`, `database`, `user`, and `password` attributes. """ # time in seconds to wait before # reconnecting to Redshift if our session is killed too. # 30 seconds is usually fine; 60 is conservative connection_reset_wait_seconds = luigi.IntParameter(default=60) @abc.abstractproperty def host(self): return None @abc.abstractproperty def database(self): return None @abc.abstractproperty def user(self): return None @abc.abstractproperty def password(self): return None @property def update_id(self): """ This update id will be a unique identifier for this insert on this table. """ return self.task_id def output(self): """ Returns a RedshiftTarget representing the inserted dataset. Normally you don't override this. """ # uses class name as a meta-table return RedshiftTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.__class__.__name__, update_id=self.update_id) def run(self): """ Kill any open Redshift sessions for the given database. """ connection = self.output().connect() # kill any sessions other than ours and # internal Redshift sessions (rdsdb) query = ("select pg_terminate_backend(process) " "from STV_SESSIONS " "where db_name=%s " "and user_name != 'rdsdb' " "and process != pg_backend_pid()") cursor = connection.cursor() logger.info('Killing all open Redshift sessions for database: %s', self.database) try: cursor.execute(query, (self.database,)) cursor.close() connection.commit() except psycopg2.DatabaseError as e: if e.message and 'EOF' in e.message: # sometimes this operation kills the current session. # rebuild the connection. Need to pause for 30-60 seconds # before Redshift will allow us back in. connection.close() logger.info('Pausing %s seconds for Redshift to reset connection', self.connection_reset_wait_seconds) time.sleep(self.connection_reset_wait_seconds) logger.info('Reconnecting to Redshift') connection = self.output().connect() else: raise try: self.output().touch(connection) connection.commit() finally: connection.close() logger.info('Done killing all open Redshift sessions for database: %s', self.database) class RedshiftQuery(postgres.PostgresQuery): """ Template task for querying an Amazon Redshift database Usage: Subclass and override the required `host`, `database`, `user`, `password`, `table`, and `query` attributes. Override the `run` method if your use case requires some action with the query result. Task instances require a dynamic `update_id`, e.g. via parameter(s), otherwise the query will only execute once To customize the query signature as recorded in the database marker table, override the `update_id` property. """ def output(self): """ Returns a RedshiftTarget representing the executed query. Normally you don't override this. """ return RedshiftTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.table, update_id=self.update_id ) class RedshiftUnloadTask(postgres.PostgresQuery, _CredentialsMixin): """ Template task for running UNLOAD on an Amazon Redshift database Usage: Subclass and override the required `host`, `database`, `user`, `password`, `table`, and `query` attributes. Optionally, override the `autocommit` atribute to run the query in autocommit mode - this is necessary to run VACUUM for example. Override the `run` method if your use case requires some action with the query result. Task instances require a dynamic `update_id`, e.g. via parameter(s), otherwise the query will only execute once To customize the query signature as recorded in the database marker table, override the `update_id` property. You can also override the attributes provided by the CredentialsMixin if they are not supplied by your configuration or environment variables. """ @property def s3_unload_path(self): """ Override to return the load path. """ return '' @property def unload_options(self): """ Add extra or override default unload options: """ return "DELIMITER '|' ADDQUOTES GZIP ALLOWOVERWRITE PARALLEL ON" @property def unload_query(self): """ Default UNLOAD command """ return ("UNLOAD ( '{query}' ) TO '{s3_unload_path}' " "credentials '{credentials}' " "{unload_options};") def run(self): connection = self.output().connect() cursor = connection.cursor() unload_query = self.unload_query.format( query=self.query().replace("'", r"\'"), s3_unload_path=self.s3_unload_path, unload_options=self.unload_options, credentials=self._credentials()) logger.info('Executing unload query from task: {name}'.format(name=self.__class__)) cursor = connection.cursor() cursor.execute(unload_query) logger.info(cursor.statusmessage) # Update marker table self.output().touch(connection) # commit and close connection connection.commit() connection.close() def output(self): """ Returns a RedshiftTarget representing the executed query. Normally you don't override this. """ return RedshiftTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.table, update_id=self.update_id ) # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc import json import logging import time import os import luigi from luigi.contrib import postgres from luigi.contrib import rdbms from luigi.contrib.s3 import S3PathTask, S3Target logger = logging.getLogger('luigi-interface') try: import psycopg2 import psycopg2.errorcodes except ImportError: logger.warning("Loading postgres module without psycopg2 installed. " "Will crash at runtime if postgres functionality is used.") class _CredentialsMixin(): """ This mixin is used to provide the same credential properties for AWS to all Redshift tasks. It also provides a helper method to generate the credentials string for the task. """ @property def configuration_section(self): """ Override to change the configuration section used to obtain default credentials. """ return 'redshift' @property def aws_access_key_id(self): """ Override to return the key id. """ return self._get_configuration_attribute('aws_access_key_id') @property def aws_secret_access_key(self): """ Override to return the secret access key. """ return self._get_configuration_attribute('aws_secret_access_key') @property def aws_account_id(self): """ Override to return the account id. """ return self._get_configuration_attribute('aws_account_id') @property def aws_arn_role_name(self): """ Override to return the arn role name. """ return self._get_configuration_attribute('aws_arn_role_name') @property def aws_session_token(self): """ Override to return the session token. """ return self._get_configuration_attribute('aws_session_token') def _get_configuration_attribute(self, attribute): config = luigi.configuration.get_config() value = config.get(self.configuration_section, attribute, default=None) if not value: value = os.environ.get(attribute.upper(), None) return value def _credentials(self): """ Return a credential string for the provided task. If no valid credentials are set, raise a NotImplementedError. """ if self.aws_account_id and self.aws_arn_role_name: return 'aws_iam_role=arn:aws:iam::{id}:role/{role}'.format( id=self.aws_account_id, role=self.aws_arn_role_name ) elif self.aws_access_key_id and self.aws_secret_access_key: return 'aws_access_key_id={key};aws_secret_access_key={secret}{opt}'.format( key=self.aws_access_key_id, secret=self.aws_secret_access_key, opt=';token={}'.format(self.aws_session_token) if self.aws_session_token else '' ) else: raise NotImplementedError("Missing Credentials. " "Ensure one of the pairs of auth args below are set " "in a configuration file, environment variables or by " "being overridden in the task: " "'aws_access_key_id' AND 'aws_secret_access_key' OR " "'aws_account_id' AND 'aws_arn_role_name'") class RedshiftTarget(postgres.PostgresTarget): """ Target for a resource in Redshift. Redshift is similar to postgres with a few adjustments required by redshift. """ marker_table = luigi.configuration.get_config().get( 'redshift', 'marker-table', 'table_updates') use_db_timestamps = False class S3CopyToTable(rdbms.CopyToTable, _CredentialsMixin): """ Template task for inserting a data set into Redshift from s3. Usage: * Subclass and override the required attributes: * `host`, * `database`, * `user`, * `password`, * `table`, * `columns`, * `s3_load_path`. * You can also override the attributes provided by the CredentialsMixin if they are not supplied by your configuration or environment variables. """ @abc.abstractmethod def s3_load_path(self): """ Override to return the load path. """ return None @abc.abstractproperty def copy_options(self): """ Add extra copy options, for example: * TIMEFORMAT 'auto' * IGNOREHEADER 1 * TRUNCATECOLUMNS * IGNOREBLANKLINES * DELIMITER '\t' """ return '' @property def prune_table(self): """ Override to set equal to the name of the table which is to be pruned. Intended to be used in conjunction with prune_column and prune_date i.e. copy to temp table, prune production table to prune_column with a date greater than prune_date, then insert into production table from temp table """ return None @property def prune_column(self): """ Override to set equal to the column of the prune_table which is to be compared Intended to be used in conjunction with prune_table and prune_date i.e. copy to temp table, prune production table to prune_column with a date greater than prune_date, then insert into production table from temp table """ return None @property def prune_date(self): """ Override to set equal to the date by which prune_column is to be compared Intended to be used in conjunction with prune_table and prune_column i.e. copy to temp table, prune production table to prune_column with a date greater than prune_date, then insert into production table from temp table """ return None @property def table_attributes(self): """ Add extra table attributes, for example: DISTSTYLE KEY DISTKEY (MY_FIELD) SORTKEY (MY_FIELD_2, MY_FIELD_3) """ return '' @property def do_truncate_table(self): """ Return True if table should be truncated before copying new data in. """ return False def do_prune(self): """ Return True if prune_table, prune_column, and prune_date are implemented. If only a subset of prune variables are override, an exception is raised to remind the user to implement all or none. Prune (data newer than prune_date deleted) before copying new data in. """ if self.prune_table and self.prune_column and self.prune_date: return True elif self.prune_table or self.prune_column or self.prune_date: raise Exception('override zero or all prune variables') else: return False @property def table_type(self): """ Return table type (i.e. 'temp'). """ return '' @property def queries(self): """ Override to return a list of queries to be executed in order. """ return [] def truncate_table(self, connection): query = "truncate %s" % self.table cursor = connection.cursor() try: cursor.execute(query) finally: cursor.close() def prune(self, connection): query = "delete from %s where %s >= %s" % (self.prune_table, self.prune_column, self.prune_date) cursor = connection.cursor() try: cursor.execute(query) finally: cursor.close() def create_table(self, connection): """ Override to provide code for creating the target table. By default it will be created using types (optionally) specified in columns. If overridden, use the provided connection object for setting up the table in order to create the table and insert data using the same transaction. """ if len(self.columns[0]) == 1: # only names of columns specified, no types raise NotImplementedError("create_table() not implemented " "for %r and columns types not " "specified" % self.table) elif len(self.columns[0]) == 2: # if columns is specified as (name, type) tuples coldefs = ','.join( '{name} {type}'.format( name=name, type=type) for name, type in self.columns ) query = ("CREATE {type} TABLE " "{table} ({coldefs}) " "{table_attributes}").format( type=self.table_type, table=self.table, coldefs=coldefs, table_attributes=self.table_attributes) connection.cursor().execute(query) elif len(self.columns[0]) == 3: # if columns is specified as (name, type, encoding) tuples # possible column encodings: https://docs.aws.amazon.com/redshift/latest/dg/c_Compression_encodings.html coldefs = ','.join( '{name} {type} ENCODE {encoding}'.format( name=name, type=type, encoding=encoding) for name, type, encoding in self.columns ) query = ("CREATE {type} TABLE " "{table} ({coldefs}) " "{table_attributes}").format( type=self.table_type, table=self.table, coldefs=coldefs, table_attributes=self.table_attributes) connection.cursor().execute(query) else: raise ValueError("create_table() found no columns for %r" % self.table) def run(self): """ If the target table doesn't exist, self.create_table will be called to attempt to create the table. """ if not (self.table): raise Exception("table need to be specified") path = self.s3_load_path() output = self.output() connection = output.connect() cursor = connection.cursor() self.init_copy(connection) self.copy(cursor, path) self.post_copy(cursor) # update marker table output.touch(connection) connection.commit() # commit and clean up connection.close() def copy(self, cursor, f): """ Defines copying from s3 into redshift. If both key-based and role-based credentials are provided, role-based will be used. """ logger.info("Inserting file: %s", f) colnames = '' if self.columns and len(self.columns) > 0: colnames = ",".join([x[0] for x in self.columns]) colnames = '({})'.format(colnames) cursor.execute(""" COPY {table} {colnames} from '{source}' CREDENTIALS '{creds}' {options} ;""".format( table=self.table, colnames=colnames, source=f, creds=self._credentials(), options=self.copy_options) ) def output(self): """ Returns a RedshiftTarget representing the inserted dataset. Normally you don't override this. """ return RedshiftTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.table, update_id=self.update_id) def does_table_exist(self, connection): """ Determine whether the table already exists. """ if '.' in self.table: query = ("select 1 as table_exists " "from information_schema.tables " "where table_schema = lower(%s) and table_name = lower(%s) limit 1") else: query = ("select 1 as table_exists " "from pg_table_def " "where tablename = lower(%s) limit 1") cursor = connection.cursor() try: cursor.execute(query, tuple(self.table.split('.'))) result = cursor.fetchone() return bool(result) finally: cursor.close() def init_copy(self, connection): """ Perform pre-copy sql - such as creating table, truncating, or removing data older than x. """ if not self.does_table_exist(connection): logger.info("Creating table %s", self.table) connection.reset() self.create_table(connection) if self.do_truncate_table: logger.info("Truncating table %s", self.table) self.truncate_table(connection) if self.do_prune(): logger.info("Removing %s older than %s from %s", self.prune_column, self.prune_date, self.prune_table) self.prune(connection) def post_copy(self, cursor): """ Performs post-copy sql - such as cleansing data, inserting into production table (if copied to temp table), etc. """ logger.info('Executing post copy queries') for query in self.queries: cursor.execute(query) class S3CopyJSONToTable(S3CopyToTable, _CredentialsMixin): """ Template task for inserting a JSON data set into Redshift from s3. Usage: * Subclass and override the required attributes: * `host`, * `database`, * `user`, * `password`, * `table`, * `columns`, * `s3_load_path`, * `jsonpath`, * `copy_json_options`. * You can also override the attributes provided by the CredentialsMixin if they are not supplied by your configuration or environment variables. """ @abc.abstractproperty def jsonpath(self): """ Override the jsonpath schema location for the table. """ return '' @abc.abstractproperty def copy_json_options(self): """ Add extra copy options, for example: * GZIP * LZOP """ return '' def copy(self, cursor, f): """ Defines copying JSON from s3 into redshift. """ logger.info("Inserting file: %s", f) cursor.execute(""" COPY %s from '%s' CREDENTIALS '%s' JSON AS '%s' %s %s ;""" % (self.table, f, self._credentials(), self.jsonpath, self.copy_json_options, self.copy_options)) class RedshiftManifestTask(S3PathTask): """ Generic task to generate a manifest file that can be used in S3CopyToTable in order to copy multiple files from your s3 folder into a redshift table at once. For full description on how to use the manifest file see http://docs.aws.amazon.com/redshift/latest/dg/loading-data-files-using-manifest.html Usage: * requires parameters * path - s3 path to the generated manifest file, including the name of the generated file to be copied into a redshift table * folder_paths - s3 paths to the folders containing files you wish to be copied Output: * generated manifest file """ # should be over ridden to point to a variety # of folders you wish to copy from folder_paths = luigi.Parameter() text_target = True def run(self): entries = [] for folder_path in self.folder_paths: s3 = S3Target(folder_path) client = s3.fs for file_name in client.list(s3.path): entries.append({ 'url': '%s/%s' % (folder_path, file_name), 'mandatory': True }) manifest = {'entries': entries} target = self.output().open('w') dump = json.dumps(manifest) if not self.text_target: dump = dump.encode('utf8') target.write(dump) target.close() class KillOpenRedshiftSessions(luigi.Task): """ An task for killing any open Redshift sessions in a given database. This is necessary to prevent open user sessions with transactions against the table from blocking drop or truncate table commands. Usage: Subclass and override the required `host`, `database`, `user`, and `password` attributes. """ # time in seconds to wait before # reconnecting to Redshift if our session is killed too. # 30 seconds is usually fine; 60 is conservative connection_reset_wait_seconds = luigi.IntParameter(default=60) @abc.abstractproperty def host(self): return None @abc.abstractproperty def database(self): return None @abc.abstractproperty def user(self): return None @abc.abstractproperty def password(self): return None @property def update_id(self): """ This update id will be a unique identifier for this insert on this table. """ return self.task_id def output(self): """ Returns a RedshiftTarget representing the inserted dataset. Normally you don't override this. """ # uses class name as a meta-table return RedshiftTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.__class__.__name__, update_id=self.update_id) def run(self): """ Kill any open Redshift sessions for the given database. """ connection = self.output().connect() # kill any sessions other than ours and # internal Redshift sessions (rdsdb) query = ("select pg_terminate_backend(process) " "from STV_SESSIONS " "where db_name=%s " "and user_name != 'rdsdb' " "and process != pg_backend_pid()") cursor = connection.cursor() logger.info('Killing all open Redshift sessions for database: %s', self.database) try: cursor.execute(query, (self.database,)) cursor.close() connection.commit() except psycopg2.DatabaseError as e: if e.message and 'EOF' in e.message: # sometimes this operation kills the current session. # rebuild the connection. Need to pause for 30-60 seconds # before Redshift will allow us back in. connection.close() logger.info('Pausing %s seconds for Redshift to reset connection', self.connection_reset_wait_seconds) time.sleep(self.connection_reset_wait_seconds) logger.info('Reconnecting to Redshift') connection = self.output().connect() else: raise try: self.output().touch(connection) connection.commit() finally: connection.close() logger.info('Done killing all open Redshift sessions for database: %s', self.database) class RedshiftQuery(postgres.PostgresQuery): """ Template task for querying an Amazon Redshift database Usage: Subclass and override the required `host`, `database`, `user`, `password`, `table`, and `query` attributes. Override the `run` method if your use case requires some action with the query result. Task instances require a dynamic `update_id`, e.g. via parameter(s), otherwise the query will only execute once To customize the query signature as recorded in the database marker table, override the `update_id` property. """ def output(self): """ Returns a RedshiftTarget representing the executed query. Normally you don't override this. """ return RedshiftTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.table, update_id=self.update_id ) class RedshiftUnloadTask(postgres.PostgresQuery, _CredentialsMixin): """ Template task for running UNLOAD on an Amazon Redshift database Usage: Subclass and override the required `host`, `database`, `user`, `password`, `table`, and `query` attributes. Optionally, override the `autocommit` atribute to run the query in autocommit mode - this is necessary to run VACUUM for example. Override the `run` method if your use case requires some action with the query result. Task instances require a dynamic `update_id`, e.g. via parameter(s), otherwise the query will only execute once To customize the query signature as recorded in the database marker table, override the `update_id` property. You can also override the attributes provided by the CredentialsMixin if they are not supplied by your configuration or environment variables. """ @property def s3_unload_path(self): """ Override to return the load path. """ return '' @property def unload_options(self): """ Add extra or override default unload options: """ return "DELIMITER '|' ADDQUOTES GZIP ALLOWOVERWRITE PARALLEL ON" @property def unload_query(self): """ Default UNLOAD command """ return ("UNLOAD ( '{query}' ) TO '{s3_unload_path}' " "credentials '{credentials}' " "{unload_options};") def run(self): connection = self.output().connect() cursor = connection.cursor() unload_query = self.unload_query.format( query=self.query().replace("'", r"\'"), s3_unload_path=self.s3_unload_path, unload_options=self.unload_options, credentials=self._credentials()) logger.info('Executing unload query from task: {name}'.format(name=self.__class__)) cursor = connection.cursor() cursor.execute(unload_query) logger.info(cursor.statusmessage) # Update marker table self.output().touch(connection) # commit and close connection connection.commit() connection.close() def output(self): """ Returns a RedshiftTarget representing the executed query. Normally you don't override this. """ return RedshiftTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.table, update_id=self.update_id )
BugsInPy/BugsInPy/temp/projects/luigi/bug-4-fixed/luigi/luigi/contrib/redshift.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-4-buggy/luigi/luigi/contrib/redshift.py
luigi-bug-30
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The worker communicates with the scheduler and does two things: 1. Sends all tasks that has to be run 2. Gets tasks from the scheduler that should be run When running in local mode, the worker talks directly to a :py:class:`~luigi.scheduler.CentralPlannerScheduler` instance. When you run a central server, the worker will talk to the scheduler using a :py:class:`~luigi.rpc.RemoteScheduler` instance. """ import abc import collections import getpass import logging import multiprocessing # Note: this seems to have some stability issues: https://github.com/spotify/luigi/pull/438 import os try: import Queue except ImportError: import queue as Queue import random import socket import threading import time import traceback import types from luigi import six from luigi import configuration from luigi import notifications from luigi.event import Event from luigi.task_register import load_task from luigi.scheduler import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, CentralPlannerScheduler from luigi.target import Target from luigi.task import Task, flatten, getpaths, TaskClassException, Config from luigi.parameter import FloatParameter, IntParameter, BoolParameter try: import simplejson as json except ImportError: import json logger = logging.getLogger('luigi-interface') # Prevent fork() from being called during a C-level getaddrinfo() which uses a process-global mutex, # that may not be unlocked in child process, resulting in the process being locked indefinitely. fork_lock = threading.Lock() class TaskException(Exception): pass @six.add_metaclass(abc.ABCMeta) class AbstractTaskProcess(multiprocessing.Process): """ Abstract super-class for tasks run in a separate process. """ def __init__(self, task, worker_id, result_queue, random_seed=False, worker_timeout=0): super(AbstractTaskProcess, self).__init__() self.task = task self.worker_id = worker_id self.result_queue = result_queue self.random_seed = random_seed if task.worker_timeout is not None: worker_timeout = task.worker_timeout self.timeout_time = time.time() + worker_timeout if worker_timeout else None @abc.abstractmethod def run(self): pass class ExternalTaskProcess(AbstractTaskProcess): """ Checks whether an external task (i.e. one that hasn't implemented `run()`) has completed. This allows us to re-evaluate the completion of external dependencies after Luigi has been invoked. """ def run(self): logger.info('[pid %s] Worker %s running %s', os.getpid(), self.worker_id, self.task.task_id) if self.random_seed: # Need to have different random seeds if running in separate processes random.seed((os.getpid(), time.time())) status = FAILED error_message = '' try: self.task.trigger_event(Event.START, self.task) t0 = time.time() status = None try: status = DONE if self.task.complete() else FAILED logger.debug('[pid %s] Task %s has status %s', os.getpid(), self.task, status) finally: self.task.trigger_event( Event.PROCESSING_TIME, self.task, time.time() - t0) error_message = json.dumps(self.task.on_success()) logger.info('[pid %s] Worker %s done %s', os.getpid(), self.worker_id, self.task.task_id) self.task.trigger_event(Event.SUCCESS, self.task) except KeyboardInterrupt: raise except BaseException as ex: status = FAILED logger.exception('[pid %s] Worker %s failed %s', os.getpid(), self.worker_id, self.task) error_message = notifications.wrap_traceback(self.task.on_failure(ex)) self.task.trigger_event(Event.FAILURE, self.task, ex) subject = "Luigi: %s FAILED" % self.task notifications.send_error_email(subject, error_message) finally: logger.debug('Putting result into queue: %s %s %s', self.task.task_id, status, error_message) self.result_queue.put( (self.task.task_id, status, error_message, [], [])) AbstractTaskProcess.register(ExternalTaskProcess) class TaskProcess(AbstractTaskProcess): """ Wrap all task execution in this class. Mainly for convenience since this is run in a separate process. """ def _run_get_new_deps(self): task_gen = self.task.run() if not isinstance(task_gen, types.GeneratorType): return None next_send = None while True: try: if next_send is None: requires = six.next(task_gen) else: requires = task_gen.send(next_send) except StopIteration: return None new_req = flatten(requires) new_deps = [(t.task_module, t.task_family, t.to_str_params()) for t in new_req] if all(t.complete() for t in new_req): next_send = getpaths(requires) else: return new_deps def run(self): logger.info('[pid %s] Worker %s running %s', os.getpid(), self.worker_id, self.task.task_id) if self.random_seed: # Need to have different random seeds if running in separate processes random.seed((os.getpid(), time.time())) status = FAILED error_message = '' missing = [] new_deps = [] try: # Verify that all the tasks are fulfilled! missing = [dep.task_id for dep in self.task.deps() if not dep.complete()] if missing: deps = 'dependency' if len(missing) == 1 else 'dependencies' raise RuntimeError('Unfulfilled %s at run time: %s' % (deps, ', '.join(missing))) self.task.trigger_event(Event.START, self.task) t0 = time.time() status = None try: new_deps = self._run_get_new_deps() if new_deps is None: status = RUNNING else: status = SUSPENDED logger.info( '[pid %s] Worker %s new requirements %s', os.getpid(), self.worker_id, self.task.task_id) return finally: if status != SUSPENDED: self.task.trigger_event( Event.PROCESSING_TIME, self.task, time.time() - t0) error_message = json.dumps(self.task.on_success()) logger.info('[pid %s] Worker %s done %s', os.getpid(), self.worker_id, self.task.task_id) self.task.trigger_event(Event.SUCCESS, self.task) status = DONE except KeyboardInterrupt: raise except BaseException as ex: status = FAILED logger.exception("[pid %s] Worker %s failed %s", os.getpid(), self.worker_id, self.task) error_message = notifications.wrap_traceback(self.task.on_failure(ex)) self.task.trigger_event(Event.FAILURE, self.task, ex) subject = "Luigi: %s FAILED" % self.task notifications.send_error_email(subject, error_message) finally: self.result_queue.put( (self.task.task_id, status, error_message, missing, new_deps)) AbstractTaskProcess.register(TaskProcess) class SingleProcessPool(object): """ Dummy process pool for using a single processor. Imitates the api of multiprocessing.Pool using single-processor equivalents. """ def apply_async(self, function, args): return function(*args) class DequeQueue(collections.deque): """ deque wrapper implementing the Queue interface. """ put = collections.deque.append get = collections.deque.pop class AsyncCompletionException(Exception): """ Exception indicating that something went wrong with checking complete. """ def __init__(self, trace): self.trace = trace class TracebackWrapper(object): """ Class to wrap tracebacks so we can know they're not just strings. """ def __init__(self, trace): self.trace = trace def check_complete(task, out_queue): """ Checks if task is complete, puts the result to out_queue. """ logger.debug("Checking if %s is complete", task) try: is_complete = task.complete() except BaseException: is_complete = TracebackWrapper(traceback.format_exc()) out_queue.put((task, is_complete)) class worker(Config): ping_interval = FloatParameter(default=1.0, config_path=dict(section='core', name='retry-delay')) keep_alive = BoolParameter(default=False, config_path=dict(section='core', name='worker-keep-alive')) count_uniques = BoolParameter(default=False, config_path=dict(section='core', name='worker-count-uniques'), description='worker-count-uniques means that we will keep a ' 'worker alive only if it has a unique pending task, as ' 'well as having keep-alive true') wait_interval = IntParameter(default=1, config_path=dict(section='core', name='worker-wait-interval')) max_reschedules = IntParameter(default=1, config_path=dict(section='core', name='worker-max-reschedules')) timeout = IntParameter(default=0, config_path=dict(section='core', name='worker-timeout')) task_limit = IntParameter(default=None, config_path=dict(section='core', name='worker-task-limit')) class KeepAliveThread(threading.Thread): """ Periodically tell the scheduler that the worker still lives. """ def __init__(self, scheduler, worker_id, ping_interval): super(KeepAliveThread, self).__init__() self._should_stop = threading.Event() self._scheduler = scheduler self._worker_id = worker_id self._ping_interval = ping_interval def stop(self): self._should_stop.set() def run(self): while True: self._should_stop.wait(self._ping_interval) if self._should_stop.is_set(): logger.info("Worker %s was stopped. Shutting down Keep-Alive thread" % self._worker_id) break with fork_lock: try: self._scheduler.ping(worker=self._worker_id) except: # httplib.BadStatusLine: logger.warning('Failed pinging scheduler') class Worker(object): """ Worker object communicates with a scheduler. Simple class that talks to a scheduler and: * tells the scheduler what it has to do + its dependencies * asks for stuff to do (pulls it in a loop and runs it) """ def __init__(self, scheduler=None, worker_id=None, worker_processes=1, assistant=False, **kwargs): if scheduler is None: scheduler = CentralPlannerScheduler() self.worker_processes = int(worker_processes) self._worker_info = self._generate_worker_info() if not worker_id: worker_id = 'Worker(%s)' % ', '.join(['%s=%s' % (k, v) for k, v in self._worker_info]) self._config = worker(**kwargs) self._id = worker_id self._scheduler = scheduler self._assistant = assistant self.host = socket.gethostname() self._scheduled_tasks = {} self._suspended_tasks = {} self._first_task = None self.add_succeeded = True self.run_succeeded = True self.unfulfilled_counts = collections.defaultdict(int) self._keep_alive_thread = KeepAliveThread(self._scheduler, self._id, self._config.ping_interval) self._keep_alive_thread.daemon = True self._keep_alive_thread.start() # Keep info about what tasks are running (could be in other processes) self._task_result_queue = multiprocessing.Queue() self._running_tasks = {} def stop(self): """ Stop the KeepAliveThread associated with this Worker. This should be called whenever you are done with a worker instance to clean up. Warning: this should _only_ be performed if you are sure this worker is not performing any work or will perform any work after this has been called TODO: also kill all currently running tasks TODO (maybe): Worker should be/have a context manager to enforce calling this whenever you stop using a Worker instance """ self._keep_alive_thread.stop() self._keep_alive_thread.join() def _generate_worker_info(self): # Generate as much info as possible about the worker # Some of these calls might not be available on all OS's args = [('salt', '%09d' % random.randrange(0, 999999999)), ('workers', self.worker_processes)] try: args += [('host', socket.gethostname())] except BaseException: pass try: args += [('username', getpass.getuser())] except BaseException: pass try: args += [('pid', os.getpid())] except BaseException: pass try: sudo_user = os.getenv("SUDO_USER") if sudo_user: args.append(('sudo_user', sudo_user)) except BaseException: pass return args def _validate_task(self, task): if not isinstance(task, Task): raise TaskException('Can not schedule non-task %s' % task) if not task.initialized(): # we can't get the repr of it since it's not initialized... raise TaskException('Task of class %s not initialized. Did you override __init__ and forget to call super(...).__init__?' % task.__class__.__name__) def _log_complete_error(self, task, tb): log_msg = "Will not schedule {task} or any dependencies due to error in complete() method:\n{tb}".format(task=task, tb=tb) logger.warning(log_msg) def _log_unexpected_error(self, task): logger.exception("Luigi unexpected framework error while scheduling %s", task) # needs to be called from within except clause def _email_complete_error(self, task, formatted_traceback): # like logger.exception but with WARNING level formatted_traceback = notifications.wrap_traceback(formatted_traceback) subject = "Luigi: {task} failed scheduling. Host: {host}".format(task=task, host=self.host) message = "Will not schedule {task} or any dependencies due to error in complete() method:\n{traceback}".format(task=task, traceback=formatted_traceback) notifications.send_error_email(subject, message) def _email_unexpected_error(self, task, formatted_traceback): formatted_traceback = notifications.wrap_traceback(formatted_traceback) subject = "Luigi: Framework error while scheduling {task}. Host: {host}".format(task=task, host=self.host) message = "Luigi framework error:\n{traceback}".format(traceback=formatted_traceback) notifications.send_error_email(subject, message) def add(self, task, multiprocess=False): """ Add a Task for the worker to check and possibly schedule and run. Returns True if task and its dependencies were successfully scheduled or completed before. """ if self._first_task is None and hasattr(task, 'task_id'): self._first_task = task.task_id self.add_succeeded = True if multiprocess: queue = multiprocessing.Manager().Queue() pool = multiprocessing.Pool() else: queue = DequeQueue() pool = SingleProcessPool() self._validate_task(task) pool.apply_async(check_complete, [task, queue]) # we track queue size ourselves because len(queue) won't work for multiprocessing queue_size = 1 try: seen = set([task.task_id]) while queue_size: current = queue.get() queue_size -= 1 item, is_complete = current for next in self._add(item, is_complete): if next.task_id not in seen: self._validate_task(next) seen.add(next.task_id) pool.apply_async(check_complete, [next, queue]) queue_size += 1 except (KeyboardInterrupt, TaskException): raise except Exception as ex: self.add_succeeded = False formatted_traceback = traceback.format_exc() self._log_unexpected_error(task) task.trigger_event(Event.BROKEN_TASK, task, ex) self._email_unexpected_error(task, formatted_traceback) return self.add_succeeded def _add(self, task, is_complete): if self._config.task_limit is not None and len(self._scheduled_tasks) >= self._config.task_limit: logger.warning('Will not schedule %s or any dependencies due to exceeded task-limit of %d', task, self._config.task_limit) return formatted_traceback = None try: self._check_complete_value(is_complete) except KeyboardInterrupt: raise except AsyncCompletionException as ex: formatted_traceback = ex.trace except BaseException: formatted_traceback = traceback.format_exc() if formatted_traceback is not None: self.add_succeeded = False self._log_complete_error(task, formatted_traceback) task.trigger_event(Event.DEPENDENCY_MISSING, task) self._email_complete_error(task, formatted_traceback) # abort, i.e. don't schedule any subtasks of a task with # failing complete()-method since we don't know if the task # is complete and subtasks might not be desirable to run if # they have already ran before return if is_complete: deps = None status = DONE runnable = False task.trigger_event(Event.DEPENDENCY_PRESENT, task) elif task.run == NotImplemented: deps = None status = PENDING runnable = configuration.get_config().getboolean('core', 'retry-external-tasks', False) task.trigger_event(Event.DEPENDENCY_MISSING, task) logger.warning('Task %s is not complete and run() is not implemented. Probably a missing external dependency.', task.task_id) else: deps = task.deps() status = PENDING runnable = True if task.disabled: status = DISABLED if deps: for d in deps: self._validate_dependency(d) task.trigger_event(Event.DEPENDENCY_DISCOVERED, task, d) yield d # return additional tasks to add deps = [d.task_id for d in deps] self._scheduled_tasks[task.task_id] = task self._scheduler.add_task(self._id, task.task_id, status=status, deps=deps, runnable=runnable, priority=task.priority, resources=task.process_resources(), params=task.to_str_params(), family=task.task_family, module=task.task_module) logger.info('Scheduled %s (%s)', task.task_id, status) def _validate_dependency(self, dependency): if isinstance(dependency, Target): raise Exception('requires() can not return Target objects. Wrap it in an ExternalTask class') elif not isinstance(dependency, Task): raise Exception('requires() must return Task objects') def _check_complete_value(self, is_complete): if is_complete not in (True, False): if isinstance(is_complete, TracebackWrapper): raise AsyncCompletionException(is_complete.trace) raise Exception("Return value of Task.complete() must be boolean (was %r)" % is_complete) def _add_worker(self): self._worker_info.append(('first_task', self._first_task)) self._scheduler.add_worker(self._id, self._worker_info) def _log_remote_tasks(self, running_tasks, n_pending_tasks, n_unique_pending): logger.info("Done") logger.info("There are no more tasks to run at this time") if running_tasks: for r in running_tasks: logger.info('%s is currently run by worker %s', r['task_id'], r['worker']) elif n_pending_tasks: logger.info("There are %s pending tasks possibly being run by other workers", n_pending_tasks) if n_unique_pending: logger.info("There are %i pending tasks unique to this worker", n_unique_pending) def _get_work(self): logger.debug("Asking scheduler for work...") r = self._scheduler.get_work(worker=self._id, host=self.host, assistant=self._assistant) n_pending_tasks = r['n_pending_tasks'] task_id = r['task_id'] running_tasks = r['running_tasks'] n_unique_pending = r['n_unique_pending'] if task_id is not None and task_id not in self._scheduled_tasks: logger.info('Did not schedule %s, will load it dynamically', task_id) try: # TODO: we should obtain the module name from the server! self._scheduled_tasks[task_id] = \ load_task(module=r.get('task_module'), task_name=r['task_family'], params_str=r['task_params']) except TaskClassException as ex: msg = 'Cannot find task for %s' % task_id logger.exception(msg) subject = 'Luigi: %s' % msg error_message = notifications.wrap_traceback(ex) notifications.send_error_email(subject, error_message) self._scheduler.add_task(self._id, task_id, status=FAILED, runnable=False, assistant=self._assistant) task_id = None self.run_succeeded = False return task_id, running_tasks, n_pending_tasks, n_unique_pending def _run_task(self, task_id): task = self._scheduled_tasks[task_id] if task.run == NotImplemented: p = ExternalTaskProcess(task, self._id, self._task_result_queue, random_seed=bool(self.worker_processes > 1), worker_timeout=self._config.timeout) else: p = TaskProcess(task, self._id, self._task_result_queue, random_seed=bool(self.worker_processes > 1), worker_timeout=self._config.timeout) self._running_tasks[task_id] = p if self.worker_processes > 1: with fork_lock: p.start() else: # Run in the same process p.run() def _purge_children(self): """ Find dead children and put a response on the result queue. :return: """ for task_id, p in six.iteritems(self._running_tasks): if not p.is_alive() and p.exitcode: error_msg = 'Worker task %s died unexpectedly with exit code %s' % (task_id, p.exitcode) elif p.timeout_time is not None and time.time() > float(p.timeout_time) and p.is_alive(): p.terminate() error_msg = 'Worker task %s timed out and was terminated.' % task_id else: continue logger.info(error_msg) self._task_result_queue.put((task_id, FAILED, error_msg, [], [])) def _handle_next_task(self): """ We have to catch three ways a task can be "done": 1. normal execution: the task runs/fails and puts a result back on the queue, 2. new dependencies: the task yielded new deps that were not complete and will be rescheduled and dependencies added, 3. child process dies: we need to catch this separately. """ while True: self._purge_children() # Deal with subprocess failures try: task_id, status, error_message, missing, new_requirements = ( self._task_result_queue.get( timeout=float(self._config.wait_interval))) except Queue.Empty: return task = self._scheduled_tasks[task_id] if not task or task_id not in self._running_tasks: continue # Not a running task. Probably already removed. # Maybe it yielded something? new_deps = [] if new_requirements: new_req = [load_task(module, name, params) for module, name, params in new_requirements] for t in new_req: self.add(t) new_deps = [t.task_id for t in new_req] self._scheduler.add_task(self._id, task_id, status=status, expl=error_message, resources=task.process_resources(), runnable=None, params=task.to_str_params(), family=task.task_family, module=task.task_module, new_deps=new_deps, assistant=self._assistant) if status == RUNNING: continue self._running_tasks.pop(task_id) # re-add task to reschedule missing dependencies if missing: reschedule = True # keep out of infinite loops by not rescheduling too many times for task_id in missing: self.unfulfilled_counts[task_id] += 1 if (self.unfulfilled_counts[task_id] > self._config.max_reschedules): reschedule = False if reschedule: self.add(task) self.run_succeeded &= status in (DONE, SUSPENDED) return def _sleeper(self): # TODO is exponential backoff necessary? while True: wait_interval = self._config.wait_interval + random.randint(1, 5) logger.debug('Sleeping for %d seconds', wait_interval) time.sleep(wait_interval) yield def _keep_alive(self, n_pending_tasks, n_unique_pending): """ Returns true if a worker should stay alive given. If worker-keep-alive is not set, this will always return false. For an assistant, it will always return the value of worker-keep-alive. Otherwise, it will return true for nonzero n_pending_tasks. If worker-count-uniques is true, it will also require that one of the tasks is unique to this worker. """ if not self._config.keep_alive: return False elif self._assistant: return True else: return n_pending_tasks and (n_unique_pending or not self._config.count_uniques) def run(self): """ Returns True if all scheduled tasks were executed successfully. """ logger.info('Running Worker with %d processes', self.worker_processes) sleeper = self._sleeper() self.run_succeeded = True self._add_worker() while True: while len(self._running_tasks) >= self.worker_processes: logger.debug('%d running tasks, waiting for next task to finish', len(self._running_tasks)) self._handle_next_task() task_id, running_tasks, n_pending_tasks, n_unique_pending = self._get_work() if task_id is None: self._log_remote_tasks(running_tasks, n_pending_tasks, n_unique_pending) if len(self._running_tasks) == 0: if self._keep_alive(n_pending_tasks, n_unique_pending): six.next(sleeper) continue else: break else: self._handle_next_task() continue # task_id is not None: logger.debug("Pending tasks: %s", n_pending_tasks) self._run_task(task_id) while len(self._running_tasks): logger.debug('Shut down Worker, %d more tasks to go', len(self._running_tasks)) self._handle_next_task() return self.run_succeeded # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The worker communicates with the scheduler and does two things: 1. Sends all tasks that has to be run 2. Gets tasks from the scheduler that should be run When running in local mode, the worker talks directly to a :py:class:`~luigi.scheduler.CentralPlannerScheduler` instance. When you run a central server, the worker will talk to the scheduler using a :py:class:`~luigi.rpc.RemoteScheduler` instance. """ import abc import collections import getpass import logging import multiprocessing # Note: this seems to have some stability issues: https://github.com/spotify/luigi/pull/438 import os try: import Queue except ImportError: import queue as Queue import random import socket import threading import time import traceback import types from luigi import six from luigi import configuration from luigi import notifications from luigi.event import Event from luigi.task_register import load_task from luigi.scheduler import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, CentralPlannerScheduler from luigi.target import Target from luigi.task import Task, flatten, getpaths, TaskClassException, Config from luigi.parameter import FloatParameter, IntParameter, BoolParameter try: import simplejson as json except ImportError: import json logger = logging.getLogger('luigi-interface') # Prevent fork() from being called during a C-level getaddrinfo() which uses a process-global mutex, # that may not be unlocked in child process, resulting in the process being locked indefinitely. fork_lock = threading.Lock() class TaskException(Exception): pass @six.add_metaclass(abc.ABCMeta) class AbstractTaskProcess(multiprocessing.Process): """ Abstract super-class for tasks run in a separate process. """ def __init__(self, task, worker_id, result_queue, random_seed=False, worker_timeout=0): super(AbstractTaskProcess, self).__init__() self.task = task self.worker_id = worker_id self.result_queue = result_queue self.random_seed = random_seed if task.worker_timeout is not None: worker_timeout = task.worker_timeout self.timeout_time = time.time() + worker_timeout if worker_timeout else None @abc.abstractmethod def run(self): pass class ExternalTaskProcess(AbstractTaskProcess): """ Checks whether an external task (i.e. one that hasn't implemented `run()`) has completed. This allows us to re-evaluate the completion of external dependencies after Luigi has been invoked. """ def run(self): logger.info('[pid %s] Worker %s running %s', os.getpid(), self.worker_id, self.task.task_id) if self.random_seed: # Need to have different random seeds if running in separate processes random.seed((os.getpid(), time.time())) status = FAILED error_message = '' try: self.task.trigger_event(Event.START, self.task) t0 = time.time() status = None try: status = DONE if self.task.complete() else FAILED logger.debug('[pid %s] Task %s has status %s', os.getpid(), self.task, status) finally: self.task.trigger_event( Event.PROCESSING_TIME, self.task, time.time() - t0) error_message = json.dumps(self.task.on_success()) logger.info('[pid %s] Worker %s done %s', os.getpid(), self.worker_id, self.task.task_id) self.task.trigger_event(Event.SUCCESS, self.task) except KeyboardInterrupt: raise except BaseException as ex: status = FAILED logger.exception('[pid %s] Worker %s failed %s', os.getpid(), self.worker_id, self.task) error_message = notifications.wrap_traceback(self.task.on_failure(ex)) self.task.trigger_event(Event.FAILURE, self.task, ex) subject = "Luigi: %s FAILED" % self.task notifications.send_error_email(subject, error_message) finally: logger.debug('Putting result into queue: %s %s %s', self.task.task_id, status, error_message) self.result_queue.put( (self.task.task_id, status, error_message, [], [])) AbstractTaskProcess.register(ExternalTaskProcess) class TaskProcess(AbstractTaskProcess): """ Wrap all task execution in this class. Mainly for convenience since this is run in a separate process. """ def _run_get_new_deps(self): task_gen = self.task.run() if not isinstance(task_gen, types.GeneratorType): return None next_send = None while True: try: if next_send is None: requires = six.next(task_gen) else: requires = task_gen.send(next_send) except StopIteration: return None new_req = flatten(requires) new_deps = [(t.task_module, t.task_family, t.to_str_params()) for t in new_req] if all(t.complete() for t in new_req): next_send = getpaths(requires) else: return new_deps def run(self): logger.info('[pid %s] Worker %s running %s', os.getpid(), self.worker_id, self.task.task_id) if self.random_seed: # Need to have different random seeds if running in separate processes random.seed((os.getpid(), time.time())) status = FAILED error_message = '' missing = [] new_deps = [] try: # Verify that all the tasks are fulfilled! missing = [dep.task_id for dep in self.task.deps() if not dep.complete()] if missing: deps = 'dependency' if len(missing) == 1 else 'dependencies' raise RuntimeError('Unfulfilled %s at run time: %s' % (deps, ', '.join(missing))) self.task.trigger_event(Event.START, self.task) t0 = time.time() status = None new_deps = self._run_get_new_deps() if new_deps is None: status = DONE self.task.trigger_event( Event.PROCESSING_TIME, self.task, time.time() - t0) error_message = json.dumps(self.task.on_success()) logger.info('[pid %s] Worker %s done %s', os.getpid(), self.worker_id, self.task.task_id) self.task.trigger_event(Event.SUCCESS, self.task) else: status = SUSPENDED logger.info( '[pid %s] Worker %s new requirements %s', os.getpid(), self.worker_id, self.task.task_id) except KeyboardInterrupt: raise except BaseException as ex: status = FAILED logger.exception("[pid %s] Worker %s failed %s", os.getpid(), self.worker_id, self.task) error_message = notifications.wrap_traceback(self.task.on_failure(ex)) self.task.trigger_event(Event.FAILURE, self.task, ex) subject = "Luigi: %s FAILED" % self.task notifications.send_error_email(subject, error_message) finally: self.result_queue.put( (self.task.task_id, status, error_message, missing, new_deps)) AbstractTaskProcess.register(TaskProcess) class SingleProcessPool(object): """ Dummy process pool for using a single processor. Imitates the api of multiprocessing.Pool using single-processor equivalents. """ def apply_async(self, function, args): return function(*args) class DequeQueue(collections.deque): """ deque wrapper implementing the Queue interface. """ put = collections.deque.append get = collections.deque.pop class AsyncCompletionException(Exception): """ Exception indicating that something went wrong with checking complete. """ def __init__(self, trace): self.trace = trace class TracebackWrapper(object): """ Class to wrap tracebacks so we can know they're not just strings. """ def __init__(self, trace): self.trace = trace def check_complete(task, out_queue): """ Checks if task is complete, puts the result to out_queue. """ logger.debug("Checking if %s is complete", task) try: is_complete = task.complete() except BaseException: is_complete = TracebackWrapper(traceback.format_exc()) out_queue.put((task, is_complete)) class worker(Config): ping_interval = FloatParameter(default=1.0, config_path=dict(section='core', name='retry-delay')) keep_alive = BoolParameter(default=False, config_path=dict(section='core', name='worker-keep-alive')) count_uniques = BoolParameter(default=False, config_path=dict(section='core', name='worker-count-uniques'), description='worker-count-uniques means that we will keep a ' 'worker alive only if it has a unique pending task, as ' 'well as having keep-alive true') wait_interval = IntParameter(default=1, config_path=dict(section='core', name='worker-wait-interval')) max_reschedules = IntParameter(default=1, config_path=dict(section='core', name='worker-max-reschedules')) timeout = IntParameter(default=0, config_path=dict(section='core', name='worker-timeout')) task_limit = IntParameter(default=None, config_path=dict(section='core', name='worker-task-limit')) class KeepAliveThread(threading.Thread): """ Periodically tell the scheduler that the worker still lives. """ def __init__(self, scheduler, worker_id, ping_interval): super(KeepAliveThread, self).__init__() self._should_stop = threading.Event() self._scheduler = scheduler self._worker_id = worker_id self._ping_interval = ping_interval def stop(self): self._should_stop.set() def run(self): while True: self._should_stop.wait(self._ping_interval) if self._should_stop.is_set(): logger.info("Worker %s was stopped. Shutting down Keep-Alive thread" % self._worker_id) break with fork_lock: try: self._scheduler.ping(worker=self._worker_id) except: # httplib.BadStatusLine: logger.warning('Failed pinging scheduler') class Worker(object): """ Worker object communicates with a scheduler. Simple class that talks to a scheduler and: * tells the scheduler what it has to do + its dependencies * asks for stuff to do (pulls it in a loop and runs it) """ def __init__(self, scheduler=None, worker_id=None, worker_processes=1, assistant=False, **kwargs): if scheduler is None: scheduler = CentralPlannerScheduler() self.worker_processes = int(worker_processes) self._worker_info = self._generate_worker_info() if not worker_id: worker_id = 'Worker(%s)' % ', '.join(['%s=%s' % (k, v) for k, v in self._worker_info]) self._config = worker(**kwargs) self._id = worker_id self._scheduler = scheduler self._assistant = assistant self.host = socket.gethostname() self._scheduled_tasks = {} self._suspended_tasks = {} self._first_task = None self.add_succeeded = True self.run_succeeded = True self.unfulfilled_counts = collections.defaultdict(int) self._keep_alive_thread = KeepAliveThread(self._scheduler, self._id, self._config.ping_interval) self._keep_alive_thread.daemon = True self._keep_alive_thread.start() # Keep info about what tasks are running (could be in other processes) self._task_result_queue = multiprocessing.Queue() self._running_tasks = {} def stop(self): """ Stop the KeepAliveThread associated with this Worker. This should be called whenever you are done with a worker instance to clean up. Warning: this should _only_ be performed if you are sure this worker is not performing any work or will perform any work after this has been called TODO: also kill all currently running tasks TODO (maybe): Worker should be/have a context manager to enforce calling this whenever you stop using a Worker instance """ self._keep_alive_thread.stop() self._keep_alive_thread.join() def _generate_worker_info(self): # Generate as much info as possible about the worker # Some of these calls might not be available on all OS's args = [('salt', '%09d' % random.randrange(0, 999999999)), ('workers', self.worker_processes)] try: args += [('host', socket.gethostname())] except BaseException: pass try: args += [('username', getpass.getuser())] except BaseException: pass try: args += [('pid', os.getpid())] except BaseException: pass try: sudo_user = os.getenv("SUDO_USER") if sudo_user: args.append(('sudo_user', sudo_user)) except BaseException: pass return args def _validate_task(self, task): if not isinstance(task, Task): raise TaskException('Can not schedule non-task %s' % task) if not task.initialized(): # we can't get the repr of it since it's not initialized... raise TaskException('Task of class %s not initialized. Did you override __init__ and forget to call super(...).__init__?' % task.__class__.__name__) def _log_complete_error(self, task, tb): log_msg = "Will not schedule {task} or any dependencies due to error in complete() method:\n{tb}".format(task=task, tb=tb) logger.warning(log_msg) def _log_unexpected_error(self, task): logger.exception("Luigi unexpected framework error while scheduling %s", task) # needs to be called from within except clause def _email_complete_error(self, task, formatted_traceback): # like logger.exception but with WARNING level formatted_traceback = notifications.wrap_traceback(formatted_traceback) subject = "Luigi: {task} failed scheduling. Host: {host}".format(task=task, host=self.host) message = "Will not schedule {task} or any dependencies due to error in complete() method:\n{traceback}".format(task=task, traceback=formatted_traceback) notifications.send_error_email(subject, message) def _email_unexpected_error(self, task, formatted_traceback): formatted_traceback = notifications.wrap_traceback(formatted_traceback) subject = "Luigi: Framework error while scheduling {task}. Host: {host}".format(task=task, host=self.host) message = "Luigi framework error:\n{traceback}".format(traceback=formatted_traceback) notifications.send_error_email(subject, message) def add(self, task, multiprocess=False): """ Add a Task for the worker to check and possibly schedule and run. Returns True if task and its dependencies were successfully scheduled or completed before. """ if self._first_task is None and hasattr(task, 'task_id'): self._first_task = task.task_id self.add_succeeded = True if multiprocess: queue = multiprocessing.Manager().Queue() pool = multiprocessing.Pool() else: queue = DequeQueue() pool = SingleProcessPool() self._validate_task(task) pool.apply_async(check_complete, [task, queue]) # we track queue size ourselves because len(queue) won't work for multiprocessing queue_size = 1 try: seen = set([task.task_id]) while queue_size: current = queue.get() queue_size -= 1 item, is_complete = current for next in self._add(item, is_complete): if next.task_id not in seen: self._validate_task(next) seen.add(next.task_id) pool.apply_async(check_complete, [next, queue]) queue_size += 1 except (KeyboardInterrupt, TaskException): raise except Exception as ex: self.add_succeeded = False formatted_traceback = traceback.format_exc() self._log_unexpected_error(task) task.trigger_event(Event.BROKEN_TASK, task, ex) self._email_unexpected_error(task, formatted_traceback) return self.add_succeeded def _add(self, task, is_complete): if self._config.task_limit is not None and len(self._scheduled_tasks) >= self._config.task_limit: logger.warning('Will not schedule %s or any dependencies due to exceeded task-limit of %d', task, self._config.task_limit) return formatted_traceback = None try: self._check_complete_value(is_complete) except KeyboardInterrupt: raise except AsyncCompletionException as ex: formatted_traceback = ex.trace except BaseException: formatted_traceback = traceback.format_exc() if formatted_traceback is not None: self.add_succeeded = False self._log_complete_error(task, formatted_traceback) task.trigger_event(Event.DEPENDENCY_MISSING, task) self._email_complete_error(task, formatted_traceback) # abort, i.e. don't schedule any subtasks of a task with # failing complete()-method since we don't know if the task # is complete and subtasks might not be desirable to run if # they have already ran before return if is_complete: deps = None status = DONE runnable = False task.trigger_event(Event.DEPENDENCY_PRESENT, task) elif task.run == NotImplemented: deps = None status = PENDING runnable = configuration.get_config().getboolean('core', 'retry-external-tasks', False) task.trigger_event(Event.DEPENDENCY_MISSING, task) logger.warning('Task %s is not complete and run() is not implemented. Probably a missing external dependency.', task.task_id) else: deps = task.deps() status = PENDING runnable = True if task.disabled: status = DISABLED if deps: for d in deps: self._validate_dependency(d) task.trigger_event(Event.DEPENDENCY_DISCOVERED, task, d) yield d # return additional tasks to add deps = [d.task_id for d in deps] self._scheduled_tasks[task.task_id] = task self._scheduler.add_task(self._id, task.task_id, status=status, deps=deps, runnable=runnable, priority=task.priority, resources=task.process_resources(), params=task.to_str_params(), family=task.task_family, module=task.task_module) logger.info('Scheduled %s (%s)', task.task_id, status) def _validate_dependency(self, dependency): if isinstance(dependency, Target): raise Exception('requires() can not return Target objects. Wrap it in an ExternalTask class') elif not isinstance(dependency, Task): raise Exception('requires() must return Task objects') def _check_complete_value(self, is_complete): if is_complete not in (True, False): if isinstance(is_complete, TracebackWrapper): raise AsyncCompletionException(is_complete.trace) raise Exception("Return value of Task.complete() must be boolean (was %r)" % is_complete) def _add_worker(self): self._worker_info.append(('first_task', self._first_task)) self._scheduler.add_worker(self._id, self._worker_info) def _log_remote_tasks(self, running_tasks, n_pending_tasks, n_unique_pending): logger.info("Done") logger.info("There are no more tasks to run at this time") if running_tasks: for r in running_tasks: logger.info('%s is currently run by worker %s', r['task_id'], r['worker']) elif n_pending_tasks: logger.info("There are %s pending tasks possibly being run by other workers", n_pending_tasks) if n_unique_pending: logger.info("There are %i pending tasks unique to this worker", n_unique_pending) def _get_work(self): logger.debug("Asking scheduler for work...") r = self._scheduler.get_work(worker=self._id, host=self.host, assistant=self._assistant) n_pending_tasks = r['n_pending_tasks'] task_id = r['task_id'] running_tasks = r['running_tasks'] n_unique_pending = r['n_unique_pending'] if task_id is not None and task_id not in self._scheduled_tasks: logger.info('Did not schedule %s, will load it dynamically', task_id) try: # TODO: we should obtain the module name from the server! self._scheduled_tasks[task_id] = \ load_task(module=r.get('task_module'), task_name=r['task_family'], params_str=r['task_params']) except TaskClassException as ex: msg = 'Cannot find task for %s' % task_id logger.exception(msg) subject = 'Luigi: %s' % msg error_message = notifications.wrap_traceback(ex) notifications.send_error_email(subject, error_message) self._scheduler.add_task(self._id, task_id, status=FAILED, runnable=False, assistant=self._assistant) task_id = None self.run_succeeded = False return task_id, running_tasks, n_pending_tasks, n_unique_pending def _run_task(self, task_id): task = self._scheduled_tasks[task_id] if task.run == NotImplemented: p = ExternalTaskProcess(task, self._id, self._task_result_queue, random_seed=bool(self.worker_processes > 1), worker_timeout=self._config.timeout) else: p = TaskProcess(task, self._id, self._task_result_queue, random_seed=bool(self.worker_processes > 1), worker_timeout=self._config.timeout) self._running_tasks[task_id] = p if self.worker_processes > 1: with fork_lock: p.start() else: # Run in the same process p.run() def _purge_children(self): """ Find dead children and put a response on the result queue. :return: """ for task_id, p in six.iteritems(self._running_tasks): if not p.is_alive() and p.exitcode: error_msg = 'Worker task %s died unexpectedly with exit code %s' % (task_id, p.exitcode) elif p.timeout_time is not None and time.time() > float(p.timeout_time) and p.is_alive(): p.terminate() error_msg = 'Worker task %s timed out and was terminated.' % task_id else: continue logger.info(error_msg) self._task_result_queue.put((task_id, FAILED, error_msg, [], [])) def _handle_next_task(self): """ We have to catch three ways a task can be "done": 1. normal execution: the task runs/fails and puts a result back on the queue, 2. new dependencies: the task yielded new deps that were not complete and will be rescheduled and dependencies added, 3. child process dies: we need to catch this separately. """ while True: self._purge_children() # Deal with subprocess failures try: task_id, status, error_message, missing, new_requirements = ( self._task_result_queue.get( timeout=float(self._config.wait_interval))) except Queue.Empty: return task = self._scheduled_tasks[task_id] if not task or task_id not in self._running_tasks: continue # Not a running task. Probably already removed. # Maybe it yielded something? new_deps = [] if new_requirements: new_req = [load_task(module, name, params) for module, name, params in new_requirements] for t in new_req: self.add(t) new_deps = [t.task_id for t in new_req] self._scheduler.add_task(self._id, task_id, status=status, expl=error_message, resources=task.process_resources(), runnable=None, params=task.to_str_params(), family=task.task_family, module=task.task_module, new_deps=new_deps, assistant=self._assistant) if status == RUNNING: continue self._running_tasks.pop(task_id) # re-add task to reschedule missing dependencies if missing: reschedule = True # keep out of infinite loops by not rescheduling too many times for task_id in missing: self.unfulfilled_counts[task_id] += 1 if (self.unfulfilled_counts[task_id] > self._config.max_reschedules): reschedule = False if reschedule: self.add(task) self.run_succeeded &= status in (DONE, SUSPENDED) return def _sleeper(self): # TODO is exponential backoff necessary? while True: wait_interval = self._config.wait_interval + random.randint(1, 5) logger.debug('Sleeping for %d seconds', wait_interval) time.sleep(wait_interval) yield def _keep_alive(self, n_pending_tasks, n_unique_pending): """ Returns true if a worker should stay alive given. If worker-keep-alive is not set, this will always return false. For an assistant, it will always return the value of worker-keep-alive. Otherwise, it will return true for nonzero n_pending_tasks. If worker-count-uniques is true, it will also require that one of the tasks is unique to this worker. """ if not self._config.keep_alive: return False elif self._assistant: return True else: return n_pending_tasks and (n_unique_pending or not self._config.count_uniques) def run(self): """ Returns True if all scheduled tasks were executed successfully. """ logger.info('Running Worker with %d processes', self.worker_processes) sleeper = self._sleeper() self.run_succeeded = True self._add_worker() while True: while len(self._running_tasks) >= self.worker_processes: logger.debug('%d running tasks, waiting for next task to finish', len(self._running_tasks)) self._handle_next_task() task_id, running_tasks, n_pending_tasks, n_unique_pending = self._get_work() if task_id is None: self._log_remote_tasks(running_tasks, n_pending_tasks, n_unique_pending) if len(self._running_tasks) == 0: if self._keep_alive(n_pending_tasks, n_unique_pending): six.next(sleeper) continue else: break else: self._handle_next_task() continue # task_id is not None: logger.debug("Pending tasks: %s", n_pending_tasks) self._run_task(task_id) while len(self._running_tasks): logger.debug('Shut down Worker, %d more tasks to go', len(self._running_tasks)) self._handle_next_task() return self.run_succeeded
BugsInPy/BugsInPy/temp/projects/luigi/bug-30-fixed/luigi/luigi/worker.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-30-buggy/luigi/luigi/worker.py
luigi-bug-8
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc import json import logging import time import os import luigi from luigi import postgres from luigi.contrib import rdbms from luigi.s3 import S3PathTask, S3Target logger = logging.getLogger('luigi-interface') try: import psycopg2 import psycopg2.errorcodes except ImportError: logger.warning("Loading postgres module without psycopg2 installed. " "Will crash at runtime if postgres functionality is used.") class RedshiftTarget(postgres.PostgresTarget): """ Target for a resource in Redshift. Redshift is similar to postgres with a few adjustments required by redshift. """ marker_table = luigi.configuration.get_config().get( 'redshift', 'marker-table', 'table_updates') use_db_timestamps = False class S3CopyToTable(rdbms.CopyToTable): """ Template task for inserting a data set into Redshift from s3. Usage: * Subclass and override the required attributes: * `host`, * `database`, * `user`, * `password`, * `table`, * `columns`, * `aws_access_key_id`, * `aws_secret_access_key`, * `s3_load_path`. """ @abc.abstractmethod def s3_load_path(self): """ Override to return the load path. """ return None @property def aws_access_key_id(self): """ Override to return the key id. """ return None @property def aws_secret_access_key(self): """ Override to return the secret access key. """ return None @property def aws_account_id(self): """ Override to return the account id. """ return None @property def aws_arn_role_name(self): """ Override to return the arn role name. """ return None @property def aws_session_token(self): """ Override to return the session token. """ return None @abc.abstractproperty def copy_options(self): """ Add extra copy options, for example: * TIMEFORMAT 'auto' * IGNOREHEADER 1 * TRUNCATECOLUMNS * IGNOREBLANKLINES * DELIMITER '\t' """ return '' @property def prune_table(self): """ Override to set equal to the name of the table which is to be pruned. Intended to be used in conjunction with prune_column and prune_date i.e. copy to temp table, prune production table to prune_column with a date greater than prune_date, then insert into production table from temp table """ return None @property def prune_column(self): """ Override to set equal to the column of the prune_table which is to be compared Intended to be used in conjunction with prune_table and prune_date i.e. copy to temp table, prune production table to prune_column with a date greater than prune_date, then insert into production table from temp table """ return None @property def prune_date(self): """ Override to set equal to the date by which prune_column is to be compared Intended to be used in conjunction with prune_table and prune_column i.e. copy to temp table, prune production table to prune_column with a date greater than prune_date, then insert into production table from temp table """ return None @property def table_attributes(self): """ Add extra table attributes, for example: DISTSTYLE KEY DISTKEY (MY_FIELD) SORTKEY (MY_FIELD_2, MY_FIELD_3) """ return '' @property def do_truncate_table(self): """ Return True if table should be truncated before copying new data in. """ return False def do_prune(self): """ Return True if prune_table, prune_column, and prune_date are implemented. If only a subset of prune variables are override, an exception is raised to remind the user to implement all or none. Prune (data newer than prune_date deleted) before copying new data in. """ if self.prune_table and self.prune_column and self.prune_date: return True elif self.prune_table or self.prune_column or self.prune_date: raise Exception('override zero or all prune variables') else: return False @property def table_type(self): """ Return table type (i.e. 'temp'). """ return '' @property def queries(self): """ Override to return a list of queries to be executed in order. """ return [] def truncate_table(self, connection): query = "truncate %s" % self.table cursor = connection.cursor() try: cursor.execute(query) finally: cursor.close() def prune(self, connection): query = "delete from %s where %s >= %s" % (self.prune_table, self.prune_column, self.prune_date) cursor = connection.cursor() try: cursor.execute(query) finally: cursor.close() def create_table(self, connection): """ Override to provide code for creating the target table. By default it will be created using types (optionally) specified in columns. If overridden, use the provided connection object for setting up the table in order to create the table and insert data using the same transaction. """ if len(self.columns[0]) == 1: # only names of columns specified, no types raise NotImplementedError("create_table() not implemented " "for %r and columns types not " "specified" % self.table) elif len(self.columns[0]) == 2: # if columns is specified as (name, type) tuples coldefs = ','.join( '{name} {type}'.format( name=name, type=type) for name, type in self.columns ) query = ("CREATE {type} TABLE " "{table} ({coldefs}) " "{table_attributes}").format( type=self.table_type, table=self.table, coldefs=coldefs, table_attributes=self.table_attributes) connection.cursor().execute(query) else: raise ValueError("create_table() found no columns for %r" % self.table) def run(self): """ If the target table doesn't exist, self.create_table will be called to attempt to create the table. """ if not (self.table): raise Exception("table need to be specified") path = self.s3_load_path() output = self.output() connection = output.connect() cursor = connection.cursor() self.init_copy(connection) self.copy(cursor, path) self.post_copy(cursor) # update marker table output.touch(connection) connection.commit() # commit and clean up connection.close() def copy(self, cursor, f): """ Defines copying from s3 into redshift. If both key-based and role-based credentials are provided, role-based will be used. """ # format the credentials string dependent upon which type of credentials were provided if self.aws_account_id and self.aws_arn_role_name: cred_str = 'aws_iam_role=arn:aws:iam::{id}:role/{role}'.format( id=self.aws_account_id, role=self.aws_arn_role_name ) elif self.aws_access_key_id and self.aws_secret_access_key: cred_str = 'aws_access_key_id={key};aws_secret_access_key={secret}{opt}'.format( key=self.aws_access_key_id, secret=self.aws_secret_access_key, opt=';token={}'.format(self.aws_session_token) if self.aws_session_token else '' ) else: raise NotImplementedError("Missing Credentials. " "Override one of the following pairs of auth-args: " "'aws_access_key_id' AND 'aws_secret_access_key' OR " "'aws_account_id' AND 'aws_arn_role_name'") logger.info("Inserting file: %s", f) cursor.execute(""" COPY {table} from '{source}' CREDENTIALS '{creds}' {options} ;""".format( table=self.table, source=f, creds=cred_str, options=self.copy_options) ) def output(self): """ Returns a RedshiftTarget representing the inserted dataset. Normally you don't override this. """ return RedshiftTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.table, update_id=self.update_id) def does_table_exist(self, connection): """ Determine whether the table already exists. """ if '.' in self.table: query = ("select 1 as table_exists " "from information_schema.tables " "where table_schema = %s and table_name = %s limit 1") else: query = ("select 1 as table_exists " "from pg_table_def " "where tablename = %s limit 1") cursor = connection.cursor() try: cursor.execute(query, tuple(self.table.split('.'))) result = cursor.fetchone() return bool(result) finally: cursor.close() def init_copy(self, connection): """ Perform pre-copy sql - such as creating table, truncating, or removing data older than x. """ if not self.does_table_exist(connection): logger.info("Creating table %s", self.table) connection.reset() self.create_table(connection) if self.do_truncate_table: logger.info("Truncating table %s", self.table) self.truncate_table(connection) if self.do_prune(): logger.info("Removing %s older than %s from %s", self.prune_column, self.prune_date, self.prune_table) self.prune(connection) def post_copy(self, cursor): """ Performs post-copy sql - such as cleansing data, inserting into production table (if copied to temp table), etc. """ logger.info('Executing post copy queries') for query in self.queries: cursor.execute(query) class S3CopyJSONToTable(S3CopyToTable): """ Template task for inserting a JSON data set into Redshift from s3. Usage: * Subclass and override the required attributes: * `host`, * `database`, * `user`, * `password`, * `table`, * `columns`, * `aws_access_key_id`, * `aws_secret_access_key`, * `s3_load_path`, * `jsonpath`, * `copy_json_options`. """ @abc.abstractproperty def jsonpath(self): """ Override the jsonpath schema location for the table. """ return '' @abc.abstractproperty def copy_json_options(self): """ Add extra copy options, for example: * GZIP * LZOP """ return '' def copy(self, cursor, f): """ Defines copying JSON from s3 into redshift. """ # if session token is set, create token string if self.aws_session_token: token = ';token=%s' % self.aws_session_token # otherwise, leave token string empty else: token = '' logger.info("Inserting file: %s", f) cursor.execute(""" COPY %s from '%s' CREDENTIALS 'aws_access_key_id=%s;aws_secret_access_key=%s%s' JSON AS '%s' %s %s ;""" % (self.table, f, self.aws_access_key_id, self.aws_secret_access_key, token, self.jsonpath, self.copy_json_options, self.copy_options)) class RedshiftManifestTask(S3PathTask): """ Generic task to generate a manifest file that can be used in S3CopyToTable in order to copy multiple files from your s3 folder into a redshift table at once. For full description on how to use the manifest file see http://docs.aws.amazon.com/redshift/latest/dg/loading-data-files-using-manifest.html Usage: * requires parameters * path - s3 path to the generated manifest file, including the name of the generated file to be copied into a redshift table * folder_paths - s3 paths to the folders containing files you wish to be copied Output: * generated manifest file """ # should be over ridden to point to a variety # of folders you wish to copy from folder_paths = luigi.Parameter() text_target = True def run(self): entries = [] for folder_path in self.folder_paths: s3 = S3Target(folder_path) client = s3.fs for file_name in client.list(s3.path): entries.append({ 'url': '%s/%s' % (folder_path, file_name), 'mandatory': True }) manifest = {'entries': entries} target = self.output().open('w') dump = json.dumps(manifest) if not self.text_target: dump = dump.encode('utf8') target.write(dump) target.close() class KillOpenRedshiftSessions(luigi.Task): """ An task for killing any open Redshift sessions in a given database. This is necessary to prevent open user sessions with transactions against the table from blocking drop or truncate table commands. Usage: Subclass and override the required `host`, `database`, `user`, and `password` attributes. """ # time in seconds to wait before # reconnecting to Redshift if our session is killed too. # 30 seconds is usually fine; 60 is conservative connection_reset_wait_seconds = luigi.IntParameter(default=60) @abc.abstractproperty def host(self): return None @abc.abstractproperty def database(self): return None @abc.abstractproperty def user(self): return None @abc.abstractproperty def password(self): return None @property def update_id(self): """ This update id will be a unique identifier for this insert on this table. """ return self.task_id def output(self): """ Returns a RedshiftTarget representing the inserted dataset. Normally you don't override this. """ # uses class name as a meta-table return RedshiftTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.__class__.__name__, update_id=self.update_id) def run(self): """ Kill any open Redshift sessions for the given database. """ connection = self.output().connect() # kill any sessions other than ours and # internal Redshift sessions (rdsdb) query = ("select pg_terminate_backend(process) " "from STV_SESSIONS " "where db_name=%s " "and user_name != 'rdsdb' " "and process != pg_backend_pid()") cursor = connection.cursor() logger.info('Killing all open Redshift sessions for database: %s', self.database) try: cursor.execute(query, (self.database,)) cursor.close() connection.commit() except psycopg2.DatabaseError as e: if e.message and 'EOF' in e.message: # sometimes this operation kills the current session. # rebuild the connection. Need to pause for 30-60 seconds # before Redshift will allow us back in. connection.close() logger.info('Pausing %s seconds for Redshift to reset connection', self.connection_reset_wait_seconds) time.sleep(self.connection_reset_wait_seconds) logger.info('Reconnecting to Redshift') connection = self.output().connect() else: raise try: self.output().touch(connection) connection.commit() finally: connection.close() logger.info('Done killing all open Redshift sessions for database: %s', self.database) class RedshiftQuery(postgres.PostgresQuery): """ Template task for querying an Amazon Redshift database Usage: Subclass and override the required `host`, `database`, `user`, `password`, `table`, and `query` attributes. Override the `run` method if your use case requires some action with the query result. Task instances require a dynamic `update_id`, e.g. via parameter(s), otherwise the query will only execute once To customize the query signature as recorded in the database marker table, override the `update_id` property. """ def output(self): """ Returns a RedshiftTarget representing the executed query. Normally you don't override this. """ return RedshiftTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.table, update_id=self.update_id ) class RedshiftUnloadTask(postgres.PostgresQuery): """ Template task for running UNLOAD on an Amazon Redshift database Usage: Subclass and override the required `host`, `database`, `user`, `password`, `table`, and `query` attributes. Override the `run` method if your use case requires some action with the query result. Task instances require a dynamic `update_id`, e.g. via parameter(s), otherwise the query will only execute once To customize the query signature as recorded in the database marker table, override the `update_id` property. """ @abc.abstractproperty def aws_access_key_id(self): """ Override to return the key id. """ return None @abc.abstractproperty def aws_secret_access_key(self): """ Override to return the secret access key. """ return None @property def s3_unload_path(self): """ Override to return the load path. """ return '' @property def unload_options(self): """ Add extra or override default unload options: """ return "DELIMITER '|' ADDQUOTES GZIP ALLOWOVERWRITE PARALLEL ON" @property def unload_query(self): """ Default UNLOAD command """ return ("UNLOAD ( '{query}' ) TO '{s3_unload_path}' " "credentials 'aws_access_key_id={s3_access_key};aws_secret_access_key={s3_security_key}' " "{unload_options};") def run(self): connection = self.output().connect() cursor = connection.cursor() # Retrieve AWS s3 credentials config = luigi.configuration.get_config() if self.aws_access_key_id is None or self.aws_secret_access_key is None: self.aws_access_key_id = config.get('s3', 'aws_access_key_id') self.aws_secret_access_key = config.get('s3', 'aws_secret_access_key') # Optionally we can access env variables to get the keys if self.aws_access_key_id is None or self.aws_access_key_id.strip() == '' \ or self.aws_secret_access_key is None or self.aws_secret_access_key.strip() == '': self.aws_access_key_id = os.environ['AWS_ACCESS_KEY_ID'] self.aws_secret_access_key = os.environ['AWS_SECRET_ACCESS_KEY'] unload_query = self.unload_query.format( query=self.query().replace("'", r"\'"), s3_unload_path=self.s3_unload_path, unload_options=self.unload_options, s3_access_key=self.aws_access_key_id, s3_security_key=self.aws_secret_access_key) logger.info('Executing unload query from task: {name}'.format(name=self.__class__)) try: cursor = connection.cursor() cursor.execute(unload_query) logger.info(cursor.statusmessage) except: raise # Update marker table self.output().touch(connection) # commit and close connection connection.commit() connection.close() def output(self): """ Returns a RedshiftTarget representing the executed query. Normally you don't override this. """ return RedshiftTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.table, update_id=self.update_id ) # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc import json import logging import time import os import luigi from luigi import postgres from luigi.contrib import rdbms from luigi.s3 import S3PathTask, S3Target logger = logging.getLogger('luigi-interface') try: import psycopg2 import psycopg2.errorcodes except ImportError: logger.warning("Loading postgres module without psycopg2 installed. " "Will crash at runtime if postgres functionality is used.") class RedshiftTarget(postgres.PostgresTarget): """ Target for a resource in Redshift. Redshift is similar to postgres with a few adjustments required by redshift. """ marker_table = luigi.configuration.get_config().get( 'redshift', 'marker-table', 'table_updates') use_db_timestamps = False class S3CopyToTable(rdbms.CopyToTable): """ Template task for inserting a data set into Redshift from s3. Usage: * Subclass and override the required attributes: * `host`, * `database`, * `user`, * `password`, * `table`, * `columns`, * `aws_access_key_id`, * `aws_secret_access_key`, * `s3_load_path`. """ @abc.abstractmethod def s3_load_path(self): """ Override to return the load path. """ return None @property def aws_access_key_id(self): """ Override to return the key id. """ return None @property def aws_secret_access_key(self): """ Override to return the secret access key. """ return None @property def aws_account_id(self): """ Override to return the account id. """ return None @property def aws_arn_role_name(self): """ Override to return the arn role name. """ return None @property def aws_session_token(self): """ Override to return the session token. """ return None @abc.abstractproperty def copy_options(self): """ Add extra copy options, for example: * TIMEFORMAT 'auto' * IGNOREHEADER 1 * TRUNCATECOLUMNS * IGNOREBLANKLINES * DELIMITER '\t' """ return '' @property def prune_table(self): """ Override to set equal to the name of the table which is to be pruned. Intended to be used in conjunction with prune_column and prune_date i.e. copy to temp table, prune production table to prune_column with a date greater than prune_date, then insert into production table from temp table """ return None @property def prune_column(self): """ Override to set equal to the column of the prune_table which is to be compared Intended to be used in conjunction with prune_table and prune_date i.e. copy to temp table, prune production table to prune_column with a date greater than prune_date, then insert into production table from temp table """ return None @property def prune_date(self): """ Override to set equal to the date by which prune_column is to be compared Intended to be used in conjunction with prune_table and prune_column i.e. copy to temp table, prune production table to prune_column with a date greater than prune_date, then insert into production table from temp table """ return None @property def table_attributes(self): """ Add extra table attributes, for example: DISTSTYLE KEY DISTKEY (MY_FIELD) SORTKEY (MY_FIELD_2, MY_FIELD_3) """ return '' @property def do_truncate_table(self): """ Return True if table should be truncated before copying new data in. """ return False def do_prune(self): """ Return True if prune_table, prune_column, and prune_date are implemented. If only a subset of prune variables are override, an exception is raised to remind the user to implement all or none. Prune (data newer than prune_date deleted) before copying new data in. """ if self.prune_table and self.prune_column and self.prune_date: return True elif self.prune_table or self.prune_column or self.prune_date: raise Exception('override zero or all prune variables') else: return False @property def table_type(self): """ Return table type (i.e. 'temp'). """ return '' @property def queries(self): """ Override to return a list of queries to be executed in order. """ return [] def truncate_table(self, connection): query = "truncate %s" % self.table cursor = connection.cursor() try: cursor.execute(query) finally: cursor.close() def prune(self, connection): query = "delete from %s where %s >= %s" % (self.prune_table, self.prune_column, self.prune_date) cursor = connection.cursor() try: cursor.execute(query) finally: cursor.close() def create_table(self, connection): """ Override to provide code for creating the target table. By default it will be created using types (optionally) specified in columns. If overridden, use the provided connection object for setting up the table in order to create the table and insert data using the same transaction. """ if len(self.columns[0]) == 1: # only names of columns specified, no types raise NotImplementedError("create_table() not implemented " "for %r and columns types not " "specified" % self.table) elif len(self.columns[0]) == 2: # if columns is specified as (name, type) tuples coldefs = ','.join( '{name} {type}'.format( name=name, type=type) for name, type in self.columns ) query = ("CREATE {type} TABLE " "{table} ({coldefs}) " "{table_attributes}").format( type=self.table_type, table=self.table, coldefs=coldefs, table_attributes=self.table_attributes) connection.cursor().execute(query) else: raise ValueError("create_table() found no columns for %r" % self.table) def run(self): """ If the target table doesn't exist, self.create_table will be called to attempt to create the table. """ if not (self.table): raise Exception("table need to be specified") path = self.s3_load_path() output = self.output() connection = output.connect() cursor = connection.cursor() self.init_copy(connection) self.copy(cursor, path) self.post_copy(cursor) # update marker table output.touch(connection) connection.commit() # commit and clean up connection.close() def copy(self, cursor, f): """ Defines copying from s3 into redshift. If both key-based and role-based credentials are provided, role-based will be used. """ # format the credentials string dependent upon which type of credentials were provided if self.aws_account_id and self.aws_arn_role_name: cred_str = 'aws_iam_role=arn:aws:iam::{id}:role/{role}'.format( id=self.aws_account_id, role=self.aws_arn_role_name ) elif self.aws_access_key_id and self.aws_secret_access_key: cred_str = 'aws_access_key_id={key};aws_secret_access_key={secret}{opt}'.format( key=self.aws_access_key_id, secret=self.aws_secret_access_key, opt=';token={}'.format(self.aws_session_token) if self.aws_session_token else '' ) else: raise NotImplementedError("Missing Credentials. " "Override one of the following pairs of auth-args: " "'aws_access_key_id' AND 'aws_secret_access_key' OR " "'aws_account_id' AND 'aws_arn_role_name'") logger.info("Inserting file: %s", f) cursor.execute(""" COPY {table} from '{source}' CREDENTIALS '{creds}' {options} ;""".format( table=self.table, source=f, creds=cred_str, options=self.copy_options) ) def output(self): """ Returns a RedshiftTarget representing the inserted dataset. Normally you don't override this. """ return RedshiftTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.table, update_id=self.update_id) def does_table_exist(self, connection): """ Determine whether the table already exists. """ if '.' in self.table: query = ("select 1 as table_exists " "from information_schema.tables " "where table_schema = lower(%s) and table_name = lower(%s) limit 1") else: query = ("select 1 as table_exists " "from pg_table_def " "where tablename = lower(%s) limit 1") cursor = connection.cursor() try: cursor.execute(query, tuple(self.table.split('.'))) result = cursor.fetchone() return bool(result) finally: cursor.close() def init_copy(self, connection): """ Perform pre-copy sql - such as creating table, truncating, or removing data older than x. """ if not self.does_table_exist(connection): logger.info("Creating table %s", self.table) connection.reset() self.create_table(connection) if self.do_truncate_table: logger.info("Truncating table %s", self.table) self.truncate_table(connection) if self.do_prune(): logger.info("Removing %s older than %s from %s", self.prune_column, self.prune_date, self.prune_table) self.prune(connection) def post_copy(self, cursor): """ Performs post-copy sql - such as cleansing data, inserting into production table (if copied to temp table), etc. """ logger.info('Executing post copy queries') for query in self.queries: cursor.execute(query) class S3CopyJSONToTable(S3CopyToTable): """ Template task for inserting a JSON data set into Redshift from s3. Usage: * Subclass and override the required attributes: * `host`, * `database`, * `user`, * `password`, * `table`, * `columns`, * `aws_access_key_id`, * `aws_secret_access_key`, * `s3_load_path`, * `jsonpath`, * `copy_json_options`. """ @abc.abstractproperty def jsonpath(self): """ Override the jsonpath schema location for the table. """ return '' @abc.abstractproperty def copy_json_options(self): """ Add extra copy options, for example: * GZIP * LZOP """ return '' def copy(self, cursor, f): """ Defines copying JSON from s3 into redshift. """ # if session token is set, create token string if self.aws_session_token: token = ';token=%s' % self.aws_session_token # otherwise, leave token string empty else: token = '' logger.info("Inserting file: %s", f) cursor.execute(""" COPY %s from '%s' CREDENTIALS 'aws_access_key_id=%s;aws_secret_access_key=%s%s' JSON AS '%s' %s %s ;""" % (self.table, f, self.aws_access_key_id, self.aws_secret_access_key, token, self.jsonpath, self.copy_json_options, self.copy_options)) class RedshiftManifestTask(S3PathTask): """ Generic task to generate a manifest file that can be used in S3CopyToTable in order to copy multiple files from your s3 folder into a redshift table at once. For full description on how to use the manifest file see http://docs.aws.amazon.com/redshift/latest/dg/loading-data-files-using-manifest.html Usage: * requires parameters * path - s3 path to the generated manifest file, including the name of the generated file to be copied into a redshift table * folder_paths - s3 paths to the folders containing files you wish to be copied Output: * generated manifest file """ # should be over ridden to point to a variety # of folders you wish to copy from folder_paths = luigi.Parameter() text_target = True def run(self): entries = [] for folder_path in self.folder_paths: s3 = S3Target(folder_path) client = s3.fs for file_name in client.list(s3.path): entries.append({ 'url': '%s/%s' % (folder_path, file_name), 'mandatory': True }) manifest = {'entries': entries} target = self.output().open('w') dump = json.dumps(manifest) if not self.text_target: dump = dump.encode('utf8') target.write(dump) target.close() class KillOpenRedshiftSessions(luigi.Task): """ An task for killing any open Redshift sessions in a given database. This is necessary to prevent open user sessions with transactions against the table from blocking drop or truncate table commands. Usage: Subclass and override the required `host`, `database`, `user`, and `password` attributes. """ # time in seconds to wait before # reconnecting to Redshift if our session is killed too. # 30 seconds is usually fine; 60 is conservative connection_reset_wait_seconds = luigi.IntParameter(default=60) @abc.abstractproperty def host(self): return None @abc.abstractproperty def database(self): return None @abc.abstractproperty def user(self): return None @abc.abstractproperty def password(self): return None @property def update_id(self): """ This update id will be a unique identifier for this insert on this table. """ return self.task_id def output(self): """ Returns a RedshiftTarget representing the inserted dataset. Normally you don't override this. """ # uses class name as a meta-table return RedshiftTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.__class__.__name__, update_id=self.update_id) def run(self): """ Kill any open Redshift sessions for the given database. """ connection = self.output().connect() # kill any sessions other than ours and # internal Redshift sessions (rdsdb) query = ("select pg_terminate_backend(process) " "from STV_SESSIONS " "where db_name=%s " "and user_name != 'rdsdb' " "and process != pg_backend_pid()") cursor = connection.cursor() logger.info('Killing all open Redshift sessions for database: %s', self.database) try: cursor.execute(query, (self.database,)) cursor.close() connection.commit() except psycopg2.DatabaseError as e: if e.message and 'EOF' in e.message: # sometimes this operation kills the current session. # rebuild the connection. Need to pause for 30-60 seconds # before Redshift will allow us back in. connection.close() logger.info('Pausing %s seconds for Redshift to reset connection', self.connection_reset_wait_seconds) time.sleep(self.connection_reset_wait_seconds) logger.info('Reconnecting to Redshift') connection = self.output().connect() else: raise try: self.output().touch(connection) connection.commit() finally: connection.close() logger.info('Done killing all open Redshift sessions for database: %s', self.database) class RedshiftQuery(postgres.PostgresQuery): """ Template task for querying an Amazon Redshift database Usage: Subclass and override the required `host`, `database`, `user`, `password`, `table`, and `query` attributes. Override the `run` method if your use case requires some action with the query result. Task instances require a dynamic `update_id`, e.g. via parameter(s), otherwise the query will only execute once To customize the query signature as recorded in the database marker table, override the `update_id` property. """ def output(self): """ Returns a RedshiftTarget representing the executed query. Normally you don't override this. """ return RedshiftTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.table, update_id=self.update_id ) class RedshiftUnloadTask(postgres.PostgresQuery): """ Template task for running UNLOAD on an Amazon Redshift database Usage: Subclass and override the required `host`, `database`, `user`, `password`, `table`, and `query` attributes. Override the `run` method if your use case requires some action with the query result. Task instances require a dynamic `update_id`, e.g. via parameter(s), otherwise the query will only execute once To customize the query signature as recorded in the database marker table, override the `update_id` property. """ @abc.abstractproperty def aws_access_key_id(self): """ Override to return the key id. """ return None @abc.abstractproperty def aws_secret_access_key(self): """ Override to return the secret access key. """ return None @property def s3_unload_path(self): """ Override to return the load path. """ return '' @property def unload_options(self): """ Add extra or override default unload options: """ return "DELIMITER '|' ADDQUOTES GZIP ALLOWOVERWRITE PARALLEL ON" @property def unload_query(self): """ Default UNLOAD command """ return ("UNLOAD ( '{query}' ) TO '{s3_unload_path}' " "credentials 'aws_access_key_id={s3_access_key};aws_secret_access_key={s3_security_key}' " "{unload_options};") def run(self): connection = self.output().connect() cursor = connection.cursor() # Retrieve AWS s3 credentials config = luigi.configuration.get_config() if self.aws_access_key_id is None or self.aws_secret_access_key is None: self.aws_access_key_id = config.get('s3', 'aws_access_key_id') self.aws_secret_access_key = config.get('s3', 'aws_secret_access_key') # Optionally we can access env variables to get the keys if self.aws_access_key_id is None or self.aws_access_key_id.strip() == '' \ or self.aws_secret_access_key is None or self.aws_secret_access_key.strip() == '': self.aws_access_key_id = os.environ['AWS_ACCESS_KEY_ID'] self.aws_secret_access_key = os.environ['AWS_SECRET_ACCESS_KEY'] unload_query = self.unload_query.format( query=self.query().replace("'", r"\'"), s3_unload_path=self.s3_unload_path, unload_options=self.unload_options, s3_access_key=self.aws_access_key_id, s3_security_key=self.aws_secret_access_key) logger.info('Executing unload query from task: {name}'.format(name=self.__class__)) try: cursor = connection.cursor() cursor.execute(unload_query) logger.info(cursor.statusmessage) except: raise # Update marker table self.output().touch(connection) # commit and close connection connection.commit() connection.close() def output(self): """ Returns a RedshiftTarget representing the executed query. Normally you don't override this. """ return RedshiftTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.table, update_id=self.update_id )
BugsInPy/BugsInPy/temp/projects/luigi/bug-8-fixed/luigi/luigi/contrib/redshift.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-8-buggy/luigi/luigi/contrib/redshift.py
luigi-bug-27
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ''' Parameters are one of the core concepts of Luigi. All Parameters sit on :class:`~luigi.task.Task` classes. See :ref:`Parameter` for more info on how to define parameters. ''' import datetime import warnings try: from ConfigParser import NoOptionError, NoSectionError except ImportError: from configparser import NoOptionError, NoSectionError from luigi import six from luigi import configuration from luigi.deprecate_kwarg import deprecate_kwarg _no_value = object() class ParameterException(Exception): """ Base exception. """ pass class MissingParameterException(ParameterException): """ Exception signifying that there was a missing Parameter. """ pass class UnknownParameterException(ParameterException): """ Exception signifying that an unknown Parameter was supplied. """ pass class DuplicateParameterException(ParameterException): """ Exception signifying that a Parameter was specified multiple times. """ pass class UnknownConfigException(ParameterException): """ Exception signifying that the ``config_path`` for the Parameter could not be found. """ pass class Parameter(object): """ An untyped Parameter Parameters are objects set on the Task class level to make it possible to parameterize tasks. For instance: class MyTask(luigi.Task): foo = luigi.Parameter() This makes it possible to instantiate multiple tasks, eg ``MyTask(foo='bar')`` and ``My(foo='baz')``. The task will then have the ``foo`` attribute set appropriately. There are subclasses of ``Parameter`` that define what type the parameter has. This is not enforced within Python, but are used for command line interaction. The ``config_path`` argument lets you specify a place where the parameter is read from config in case no value is provided. When a task is instantiated, it will first use any argument as the value of the parameter, eg. if you instantiate a = TaskA(x=44) then a.x == 44. If this does not exist, it will use the value of the Parameter object, which is defined on a class level. This will be resolved in this order of falling priority: * Any value provided on the command line on the class level (eg. ``--TaskA-param xyz``) * Any value provided via config (using the ``config_path`` argument) * Any default value set using the ``default`` flag. """ counter = 0 """non-atomically increasing counter used for ordering parameters.""" @deprecate_kwarg('is_boolean', 'is_bool', False) def __init__(self, default=_no_value, is_list=False, is_boolean=False, is_global=False, significant=True, description=None, config_path=None): """ :param default: the default value for this parameter. This should match the type of the Parameter, i.e. ``datetime.date`` for ``DateParameter`` or ``int`` for ``IntParameter``. By default, no default is stored and the value must be specified at runtime. :param bool is_list: specify ``True`` if the parameter should allow a list of values rather than a single value. Default: ``False``. A list has an implicit default value of ``[]``. :param bool is_bool: specify ``True`` if the parameter is a bool value. Default: ``False``. Bool's have an implicit default value of ``False``. :param bool is_global: specify ``True`` if the parameter is global (i.e. used by multiple Tasks). Default: ``False``. DEPRECATED. :param bool significant: specify ``False`` if the parameter should not be treated as part of the unique identifier for a Task. An insignificant Parameter might also be used to specify a password or other sensitive information that should not be made public via the scheduler. Default: ``True``. :param str description: A human-readable string describing the purpose of this Parameter. For command-line invocations, this will be used as the `help` string shown to users. Default: ``None``. :param dict config_path: a dictionary with entries ``section`` and ``name`` specifying a config file entry from which to read the default value for this parameter. DEPRECATED. Default: ``None``. """ # The default default is no default self.__default = default self.__global = _no_value self.is_list = is_list self.is_bool = is_boolean and not is_list # Only BoolParameter should ever use this. TODO(erikbern): should we raise some kind of exception? self.is_global = is_global # It just means that the default value is exposed and you can override it self.significant = significant # Whether different values for this parameter will differentiate otherwise equal tasks if is_global: warnings.warn( 'is_global is deprecated and will be removed. Please use either ' ' (a) class level config (eg. --MyTask-my-param 42)' ' (b) a separate Config class with global settings on it', DeprecationWarning, stacklevel=2) if is_global and default == _no_value and config_path is None: raise ParameterException('Global parameters need default values') self.description = description if config_path is not None and ('section' not in config_path or 'name' not in config_path): raise ParameterException('config_path must be a hash containing entries for section and name') self.__config = config_path self.counter = Parameter.counter # We need to keep track of this to get the order right (see Task class) Parameter.counter += 1 def _get_value_from_config(self, section, name): """Loads the default from the config. Returns _no_value if it doesn't exist""" conf = configuration.get_config() try: value = conf.get(section, name) except (NoSectionError, NoOptionError): return _no_value if self.is_list: return tuple(self.parse(p.strip()) for p in value.strip().split('\n')) else: return self.parse(value) def _get_value(self, task_name=None, param_name=None): if self.__global != _no_value: return self.__global if task_name and param_name: v = self._get_value_from_config(task_name, param_name) if v != _no_value: return v v = self._get_value_from_config(task_name, param_name.replace('_', '-')) if v != _no_value: warnings.warn( 'The use of the configuration [%s] %s (with dashes) should be avoided. Please use underscores.' % (task_name, param_name), DeprecationWarning, stacklevel=2) return v if self.__config: v = self._get_value_from_config(self.__config['section'], self.__config['name']) if v != _no_value and task_name and param_name: warnings.warn( 'The use of the configuration [%s] %s is deprecated. Please use [%s] %s' % (self.__config['section'], self.__config['name'], task_name, param_name), DeprecationWarning, stacklevel=2) if v != _no_value: return v if self.__default != _no_value: return self.__default return _no_value @property def has_value(self): """ ``True`` if a default was specified or if config_path references a valid entry in the conf. Note that "value" refers to the Parameter object itself - it can be either 1. The default value for this parameter 2. A value read from the config 3. A global value Any Task instance can have its own value set that overrides this. """ return self._get_value() != _no_value @property def value(self): """ The value for this Parameter. This refers to any value defined by a default, a config option, or a global value. :raises MissingParameterException: if a value is not set. :return: the parsed value. """ value = self._get_value() if value == _no_value: raise MissingParameterException("No default specified") else: return value def has_task_value(self, task_name, param_name): return self._get_value(task_name, param_name) != _no_value def task_value(self, task_name, param_name): value = self._get_value(task_name, param_name) if value == _no_value: raise MissingParameterException("No default specified") else: return value def set_global(self, value): """ Set the global value of this Parameter. :param value: the new global value. """ self.__global = value def reset_global(self): self.__global = _no_value def parse(self, x): """ Parse an individual value from the input. The default implementation is an identify (it returns ``x``), but subclasses should override this method for specialized parsing. This method is called by :py:meth:`parse_from_input` if ``x`` exists. If this Parameter was specified with ``is_list=True``, then ``parse`` is called once for each item in the list. :param str x: the value to parse. :return: the parsed value. """ return x # default impl def serialize(self, x): # opposite of parse """ Opposite of :py:meth:`parse`. Converts the value ``x`` to a string. :param x: the value to serialize. """ if self.is_list: return [str(v) for v in x] return str(x) def parse_from_input(self, param_name, x): """ Parses the parameter value from input ``x``, handling defaults and is_list. :param param_name: the name of the parameter. This is used for the message in ``MissingParameterException``. :param x: the input value to parse. :raises MissingParameterException: if x is false-y and no default is specified. """ if not x: if self.has_value: return self.value elif self.is_bool: return False elif self.is_list: return [] else: raise MissingParameterException("No value for '%s' (%s) submitted and no default value has been assigned." % (param_name, "--" + param_name.replace('_', '-'))) elif self.is_list: return tuple(self.parse(p) for p in x) else: return self.parse(x) def serialize_to_input(self, x): if self.is_list: return tuple(self.serialize(p) for p in x) else: return self.serialize(x) def parser_dest(self, param_name, task_name, glob=False, is_without_section=False): if self.is_global or is_without_section: if glob: return param_name else: return None else: if glob: return task_name + '_' + param_name else: return param_name def add_to_cmdline_parser(self, parser, param_name, task_name, optparse=False, glob=False, is_without_section=False): dest = self.parser_dest(param_name, task_name, glob, is_without_section=is_without_section) if not dest: return flag = '--' + dest.replace('_', '-') description = [] description.append('%s.%s' % (task_name, param_name)) if glob: description.append('for all instances of class %s' % task_name) elif self.description: description.append(self.description) if self.has_value: description.append(" [default: %s]" % (self.value,)) if self.is_list: action = "append" elif self.is_bool: action = "store_true" else: action = "store" if optparse: f = parser.add_option else: f = parser.add_argument f(flag, help=' '.join(description), action=action, dest=dest) def parse_from_args(self, param_name, task_name, args, params): # Note: modifies arguments dest = self.parser_dest(param_name, task_name, glob=False) if dest is not None: value = getattr(args, dest, None) params[param_name] = self.parse_from_input(param_name, value) def set_global_from_args(self, param_name, task_name, args, is_without_section=False): # Note: side effects dest = self.parser_dest(param_name, task_name, glob=True, is_without_section=is_without_section) if dest is not None: value = getattr(args, dest, None) if value: self.set_global(self.parse_from_input(param_name, value)) else: # either False (bools) or None (everything else) self.reset_global() class DateHourParameter(Parameter): """ Parameter whose value is a :py:class:`~datetime.datetime` specified to the hour. A DateHourParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted date and time specified to the hour. For example, ``2013-07-10T19`` specifies July 10, 2013 at 19:00. """ date_format = '%Y-%m-%dT%H' # ISO 8601 is to use 'T' def parse(self, s): """ Parses a string to a :py:class:`~datetime.datetime` using the format string ``%Y-%m-%dT%H``. """ # TODO(erikbern): we should probably use an internal class for arbitary # time intervals (similar to date_interval). Or what do you think? return datetime.datetime.strptime(s, self.date_format) def serialize(self, dt): """ Converts the datetime to a string usnig the format string ``%Y-%m-%dT%H``. """ if dt is None: return str(dt) return dt.strftime(self.date_format) class DateMinuteParameter(DateHourParameter): """ Parameter whose value is a :py:class:`~datetime.datetime` specified to the minute. A DateMinuteParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted date and time specified to the minute. For example, ``2013-07-10T19H07`` specifies July 10, 2013 at 19:07. """ date_format = '%Y-%m-%dT%HH%M' # ISO 8601 is to use 'T' and 'H' class DateParameter(Parameter): """ Parameter whose value is a :py:class:`~datetime.date`. A DateParameter is a Date string formatted ``YYYY-MM-DD``. For example, ``2013-07-10`` specifies July 10, 2013. """ def parse(self, s): """Parses a date string formatted as ``YYYY-MM-DD``.""" return datetime.date(*map(int, s.split('-'))) class IntParameter(Parameter): """ Parameter whose value is an ``int``. """ def parse(self, s): """ Parses an ``int`` from the string using ``int()``. """ return int(s) class FloatParameter(Parameter): """ Parameter whose value is a ``float``. """ def parse(self, s): """ Parses a ``float`` from the string using ``float()``. """ return float(s) class BoolParameter(Parameter): """ A Parameter whose value is a ``bool``. """ def __init__(self, *args, **kwargs): """ This constructor passes along args and kwargs to ctor for :py:class:`Parameter` but specifies ``is_bool=True``. """ super(BoolParameter, self).__init__(*args, is_bool=True, **kwargs) def parse(self, s): """ Parses a ``bool`` from the string, matching 'true' or 'false' ignoring case. """ return {'true': True, 'false': False}[str(s).lower()] class BooleanParameter(BoolParameter): def __init__(self, *args, **kwargs): warnings.warn( 'BooleanParameter is deprecated, use BoolParameter instead', DeprecationWarning, stacklevel=2 ) super(BooleanParameter, self).__init__(*args, **kwargs) class DateIntervalParameter(Parameter): """ A Parameter whose value is a :py:class:`~luigi.date_interval.DateInterval`. Date Intervals are specified using the ISO 8601 `Time Interval <http://en.wikipedia.org/wiki/ISO_8601#Time_intervals>`_ notation. """ # Class that maps to/from dates using ISO 8601 standard # Also gives some helpful interval algebra def parse(self, s): """ Parses a `:py:class:`~luigi.date_interval.DateInterval` from the input. see :py:mod:`luigi.date_interval` for details on the parsing of DateIntervals. """ # TODO: can we use xml.utils.iso8601 or something similar? from luigi import date_interval as d for cls in [d.Year, d.Month, d.Week, d.Date, d.Custom]: i = cls.parse(s) if i: return i else: raise ValueError('Invalid date interval - could not be parsed') class TimeDeltaParameter(Parameter): """ Class that maps to timedelta using strings in any of the following forms: * ``n {w[eek[s]]|d[ay[s]]|h[our[s]]|m[inute[s]|s[second[s]]}`` (e.g. "1 week 2 days" or "1 h") Note: multiple arguments must be supplied in longest to shortest unit order * ISO 8601 duration ``PnDTnHnMnS`` (each field optional, years and months not supported) * ISO 8601 duration ``PnW`` See https://en.wikipedia.org/wiki/ISO_8601#Durations """ def _apply_regex(self, regex, input): from datetime import timedelta import re re_match = re.match(regex, input) if re_match: kwargs = {} has_val = False for k, v in six.iteritems(re_match.groupdict(default="0")): val = int(v) has_val = has_val or val != 0 kwargs[k] = val if has_val: return timedelta(**kwargs) def _parseIso8601(self, input): def field(key): return "(?P<%s>\d+)%s" % (key, key[0].upper()) def optional_field(key): return "(%s)?" % field(key) # A little loose: ISO 8601 does not allow weeks in combination with other fields, but this regex does (as does python timedelta) regex = "P(%s|%s(T%s)?)" % (field("weeks"), optional_field("days"), "".join([optional_field(key) for key in ["hours", "minutes", "seconds"]])) return self._apply_regex(regex, input) def _parseSimple(self, input): keys = ["weeks", "days", "hours", "minutes", "seconds"] # Give the digits a regex group name from the keys, then look for text with the first letter of the key, # optionally followed by the rest of the word, with final char (the "s") optional regex = "".join(["((?P<%s>\d+) ?%s(%s)?(%s)? ?)?" % (k, k[0], k[1:-1], k[-1]) for k in keys]) return self._apply_regex(regex, input) def parse(self, input): """ Parses a time delta from the input. See :py:class:`TimeDeltaParameter` for details on supported formats. """ result = self._parseIso8601(input) if not result: result = self._parseSimple(input) if result: return result else: raise ParameterException("Invalid time delta - could not parse %s" % input) # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ''' Parameters are one of the core concepts of Luigi. All Parameters sit on :class:`~luigi.task.Task` classes. See :ref:`Parameter` for more info on how to define parameters. ''' import datetime import warnings try: from ConfigParser import NoOptionError, NoSectionError except ImportError: from configparser import NoOptionError, NoSectionError from luigi import six from luigi import configuration from luigi.deprecate_kwarg import deprecate_kwarg _no_value = object() class ParameterException(Exception): """ Base exception. """ pass class MissingParameterException(ParameterException): """ Exception signifying that there was a missing Parameter. """ pass class UnknownParameterException(ParameterException): """ Exception signifying that an unknown Parameter was supplied. """ pass class DuplicateParameterException(ParameterException): """ Exception signifying that a Parameter was specified multiple times. """ pass class UnknownConfigException(ParameterException): """ Exception signifying that the ``config_path`` for the Parameter could not be found. """ pass class Parameter(object): """ An untyped Parameter Parameters are objects set on the Task class level to make it possible to parameterize tasks. For instance: class MyTask(luigi.Task): foo = luigi.Parameter() This makes it possible to instantiate multiple tasks, eg ``MyTask(foo='bar')`` and ``My(foo='baz')``. The task will then have the ``foo`` attribute set appropriately. There are subclasses of ``Parameter`` that define what type the parameter has. This is not enforced within Python, but are used for command line interaction. The ``config_path`` argument lets you specify a place where the parameter is read from config in case no value is provided. When a task is instantiated, it will first use any argument as the value of the parameter, eg. if you instantiate a = TaskA(x=44) then a.x == 44. If this does not exist, it will use the value of the Parameter object, which is defined on a class level. This will be resolved in this order of falling priority: * Any value provided on the command line on the class level (eg. ``--TaskA-param xyz``) * Any value provided via config (using the ``config_path`` argument) * Any default value set using the ``default`` flag. """ counter = 0 """non-atomically increasing counter used for ordering parameters.""" @deprecate_kwarg('is_boolean', 'is_bool', False) def __init__(self, default=_no_value, is_list=False, is_boolean=False, is_global=False, significant=True, description=None, config_path=None): """ :param default: the default value for this parameter. This should match the type of the Parameter, i.e. ``datetime.date`` for ``DateParameter`` or ``int`` for ``IntParameter``. By default, no default is stored and the value must be specified at runtime. :param bool is_list: specify ``True`` if the parameter should allow a list of values rather than a single value. Default: ``False``. A list has an implicit default value of ``[]``. :param bool is_bool: specify ``True`` if the parameter is a bool value. Default: ``False``. Bool's have an implicit default value of ``False``. :param bool is_global: specify ``True`` if the parameter is global (i.e. used by multiple Tasks). Default: ``False``. DEPRECATED. :param bool significant: specify ``False`` if the parameter should not be treated as part of the unique identifier for a Task. An insignificant Parameter might also be used to specify a password or other sensitive information that should not be made public via the scheduler. Default: ``True``. :param str description: A human-readable string describing the purpose of this Parameter. For command-line invocations, this will be used as the `help` string shown to users. Default: ``None``. :param dict config_path: a dictionary with entries ``section`` and ``name`` specifying a config file entry from which to read the default value for this parameter. DEPRECATED. Default: ``None``. """ # The default default is no default self.__default = default self.__global = _no_value self.is_list = is_list self.is_bool = is_boolean and not is_list # Only BoolParameter should ever use this. TODO(erikbern): should we raise some kind of exception? self.is_global = is_global # It just means that the default value is exposed and you can override it self.significant = significant # Whether different values for this parameter will differentiate otherwise equal tasks if is_global: warnings.warn( 'is_global is deprecated and will be removed. Please use either ' ' (a) class level config (eg. --MyTask-my-param 42)' ' (b) a separate Config class with global settings on it', DeprecationWarning, stacklevel=2) if is_global and default == _no_value and config_path is None: raise ParameterException('Global parameters need default values') self.description = description if config_path is not None and ('section' not in config_path or 'name' not in config_path): raise ParameterException('config_path must be a hash containing entries for section and name') self.__config = config_path self.counter = Parameter.counter # We need to keep track of this to get the order right (see Task class) Parameter.counter += 1 def _get_value_from_config(self, section, name): """Loads the default from the config. Returns _no_value if it doesn't exist""" conf = configuration.get_config() try: value = conf.get(section, name) except (NoSectionError, NoOptionError): return _no_value if self.is_list: return tuple(self.parse(p.strip()) for p in value.strip().split('\n')) else: return self.parse(value) def _get_value(self, task_name=None, param_name=None): if self.__global != _no_value: return self.__global if task_name and param_name: v = self._get_value_from_config(task_name, param_name) if v != _no_value: return v v = self._get_value_from_config(task_name, param_name.replace('_', '-')) if v != _no_value: warnings.warn( 'The use of the configuration [%s] %s (with dashes) should be avoided. Please use underscores.' % (task_name, param_name), DeprecationWarning, stacklevel=2) return v if self.__config: v = self._get_value_from_config(self.__config['section'], self.__config['name']) if v != _no_value and task_name and param_name: warnings.warn( 'The use of the configuration [%s] %s is deprecated. Please use [%s] %s' % (self.__config['section'], self.__config['name'], task_name, param_name), DeprecationWarning, stacklevel=2) if v != _no_value: return v if self.__default != _no_value: return self.__default return _no_value @property def has_value(self): """ ``True`` if a default was specified or if config_path references a valid entry in the conf. Note that "value" refers to the Parameter object itself - it can be either 1. The default value for this parameter 2. A value read from the config 3. A global value Any Task instance can have its own value set that overrides this. """ return self._get_value() != _no_value @property def value(self): """ The value for this Parameter. This refers to any value defined by a default, a config option, or a global value. :raises MissingParameterException: if a value is not set. :return: the parsed value. """ value = self._get_value() if value == _no_value: raise MissingParameterException("No default specified") else: return value def has_task_value(self, task_name, param_name): return self._get_value(task_name, param_name) != _no_value def task_value(self, task_name, param_name): value = self._get_value(task_name, param_name) if value == _no_value: raise MissingParameterException("No default specified") else: return value def set_global(self, value): """ Set the global value of this Parameter. :param value: the new global value. """ self.__global = value def reset_global(self): self.__global = _no_value def parse(self, x): """ Parse an individual value from the input. The default implementation is an identify (it returns ``x``), but subclasses should override this method for specialized parsing. This method is called by :py:meth:`parse_from_input` if ``x`` exists. If this Parameter was specified with ``is_list=True``, then ``parse`` is called once for each item in the list. :param str x: the value to parse. :return: the parsed value. """ return x # default impl def serialize(self, x): # opposite of parse """ Opposite of :py:meth:`parse`. Converts the value ``x`` to a string. :param x: the value to serialize. """ if self.is_list: return [str(v) for v in x] return str(x) def parse_from_input(self, param_name, x, task_name=None): """ Parses the parameter value from input ``x``, handling defaults and is_list. :param param_name: the name of the parameter. This is used for the message in ``MissingParameterException``. :param x: the input value to parse. :raises MissingParameterException: if x is false-y and no default is specified. """ if not x: if self.has_task_value(param_name=param_name, task_name=task_name): return self.task_value(param_name=param_name, task_name=task_name) elif self.is_bool: return False elif self.is_list: return [] else: raise MissingParameterException("No value for '%s' (%s) submitted and no default value has been assigned." % (param_name, "--" + param_name.replace('_', '-'))) elif self.is_list: return tuple(self.parse(p) for p in x) else: return self.parse(x) def serialize_to_input(self, x): if self.is_list: return tuple(self.serialize(p) for p in x) else: return self.serialize(x) def parser_dest(self, param_name, task_name, glob=False, is_without_section=False): if self.is_global or is_without_section: if glob: return param_name else: return None else: if glob: return task_name + '_' + param_name else: return param_name def add_to_cmdline_parser(self, parser, param_name, task_name, optparse=False, glob=False, is_without_section=False): dest = self.parser_dest(param_name, task_name, glob, is_without_section=is_without_section) if not dest: return flag = '--' + dest.replace('_', '-') description = [] description.append('%s.%s' % (task_name, param_name)) if glob: description.append('for all instances of class %s' % task_name) elif self.description: description.append(self.description) if self.has_task_value(param_name=param_name, task_name=task_name): value = self.task_value(param_name=param_name, task_name=task_name) description.append(" [default: %s]" % (value,)) if self.is_list: action = "append" elif self.is_bool: action = "store_true" else: action = "store" if optparse: f = parser.add_option else: f = parser.add_argument f(flag, help=' '.join(description), action=action, dest=dest) def parse_from_args(self, param_name, task_name, args, params): # Note: modifies arguments dest = self.parser_dest(param_name, task_name, glob=False) if dest is not None: value = getattr(args, dest, None) params[param_name] = self.parse_from_input(param_name, value, task_name=task_name) def set_global_from_args(self, param_name, task_name, args, is_without_section=False): # Note: side effects dest = self.parser_dest(param_name, task_name, glob=True, is_without_section=is_without_section) if dest is not None: value = getattr(args, dest, None) if value: self.set_global(self.parse_from_input(param_name, value, task_name=task_name)) else: # either False (bools) or None (everything else) self.reset_global() class DateHourParameter(Parameter): """ Parameter whose value is a :py:class:`~datetime.datetime` specified to the hour. A DateHourParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted date and time specified to the hour. For example, ``2013-07-10T19`` specifies July 10, 2013 at 19:00. """ date_format = '%Y-%m-%dT%H' # ISO 8601 is to use 'T' def parse(self, s): """ Parses a string to a :py:class:`~datetime.datetime` using the format string ``%Y-%m-%dT%H``. """ # TODO(erikbern): we should probably use an internal class for arbitary # time intervals (similar to date_interval). Or what do you think? return datetime.datetime.strptime(s, self.date_format) def serialize(self, dt): """ Converts the datetime to a string usnig the format string ``%Y-%m-%dT%H``. """ if dt is None: return str(dt) return dt.strftime(self.date_format) class DateMinuteParameter(DateHourParameter): """ Parameter whose value is a :py:class:`~datetime.datetime` specified to the minute. A DateMinuteParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted date and time specified to the minute. For example, ``2013-07-10T19H07`` specifies July 10, 2013 at 19:07. """ date_format = '%Y-%m-%dT%HH%M' # ISO 8601 is to use 'T' and 'H' class DateParameter(Parameter): """ Parameter whose value is a :py:class:`~datetime.date`. A DateParameter is a Date string formatted ``YYYY-MM-DD``. For example, ``2013-07-10`` specifies July 10, 2013. """ def parse(self, s): """Parses a date string formatted as ``YYYY-MM-DD``.""" return datetime.date(*map(int, s.split('-'))) class IntParameter(Parameter): """ Parameter whose value is an ``int``. """ def parse(self, s): """ Parses an ``int`` from the string using ``int()``. """ return int(s) class FloatParameter(Parameter): """ Parameter whose value is a ``float``. """ def parse(self, s): """ Parses a ``float`` from the string using ``float()``. """ return float(s) class BoolParameter(Parameter): """ A Parameter whose value is a ``bool``. """ def __init__(self, *args, **kwargs): """ This constructor passes along args and kwargs to ctor for :py:class:`Parameter` but specifies ``is_bool=True``. """ super(BoolParameter, self).__init__(*args, is_bool=True, **kwargs) def parse(self, s): """ Parses a ``bool`` from the string, matching 'true' or 'false' ignoring case. """ return {'true': True, 'false': False}[str(s).lower()] class BooleanParameter(BoolParameter): def __init__(self, *args, **kwargs): warnings.warn( 'BooleanParameter is deprecated, use BoolParameter instead', DeprecationWarning, stacklevel=2 ) super(BooleanParameter, self).__init__(*args, **kwargs) class DateIntervalParameter(Parameter): """ A Parameter whose value is a :py:class:`~luigi.date_interval.DateInterval`. Date Intervals are specified using the ISO 8601 `Time Interval <http://en.wikipedia.org/wiki/ISO_8601#Time_intervals>`_ notation. """ # Class that maps to/from dates using ISO 8601 standard # Also gives some helpful interval algebra def parse(self, s): """ Parses a `:py:class:`~luigi.date_interval.DateInterval` from the input. see :py:mod:`luigi.date_interval` for details on the parsing of DateIntervals. """ # TODO: can we use xml.utils.iso8601 or something similar? from luigi import date_interval as d for cls in [d.Year, d.Month, d.Week, d.Date, d.Custom]: i = cls.parse(s) if i: return i else: raise ValueError('Invalid date interval - could not be parsed') class TimeDeltaParameter(Parameter): """ Class that maps to timedelta using strings in any of the following forms: * ``n {w[eek[s]]|d[ay[s]]|h[our[s]]|m[inute[s]|s[second[s]]}`` (e.g. "1 week 2 days" or "1 h") Note: multiple arguments must be supplied in longest to shortest unit order * ISO 8601 duration ``PnDTnHnMnS`` (each field optional, years and months not supported) * ISO 8601 duration ``PnW`` See https://en.wikipedia.org/wiki/ISO_8601#Durations """ def _apply_regex(self, regex, input): from datetime import timedelta import re re_match = re.match(regex, input) if re_match: kwargs = {} has_val = False for k, v in six.iteritems(re_match.groupdict(default="0")): val = int(v) has_val = has_val or val != 0 kwargs[k] = val if has_val: return timedelta(**kwargs) def _parseIso8601(self, input): def field(key): return "(?P<%s>\d+)%s" % (key, key[0].upper()) def optional_field(key): return "(%s)?" % field(key) # A little loose: ISO 8601 does not allow weeks in combination with other fields, but this regex does (as does python timedelta) regex = "P(%s|%s(T%s)?)" % (field("weeks"), optional_field("days"), "".join([optional_field(key) for key in ["hours", "minutes", "seconds"]])) return self._apply_regex(regex, input) def _parseSimple(self, input): keys = ["weeks", "days", "hours", "minutes", "seconds"] # Give the digits a regex group name from the keys, then look for text with the first letter of the key, # optionally followed by the rest of the word, with final char (the "s") optional regex = "".join(["((?P<%s>\d+) ?%s(%s)?(%s)? ?)?" % (k, k[0], k[1:-1], k[-1]) for k in keys]) return self._apply_regex(regex, input) def parse(self, input): """ Parses a time delta from the input. See :py:class:`TimeDeltaParameter` for details on supported formats. """ result = self._parseIso8601(input) if not result: result = self._parseSimple(input) if result: return result else: raise ParameterException("Invalid time delta - could not parse %s" % input)
BugsInPy/BugsInPy/temp/projects/luigi/bug-27-fixed/luigi/luigi/parameter.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-27-buggy/luigi/luigi/parameter.py
luigi-bug-22
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The system for scheduling tasks and executing them in order. Deals with dependencies, priorities, resources, etc. The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them. See :doc:`/central_scheduler` for more info. """ import collections try: import cPickle as pickle except ImportError: import pickle import datetime import functools import itertools import logging import os import time from luigi import six from luigi import configuration from luigi import notifications from luigi import parameter from luigi import task_history as history from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN from luigi.task import Config logger = logging.getLogger("luigi.server") class Scheduler(object): """ Abstract base class. Note that the methods all take string arguments, not Task objects... """"" add_task = NotImplemented get_work = NotImplemented ping = NotImplemented UPSTREAM_RUNNING = 'UPSTREAM_RUNNING' UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT' UPSTREAM_FAILED = 'UPSTREAM_FAILED' UPSTREAM_DISABLED = 'UPSTREAM_DISABLED' UPSTREAM_SEVERITY_ORDER = ( '', UPSTREAM_RUNNING, UPSTREAM_MISSING_INPUT, UPSTREAM_FAILED, UPSTREAM_DISABLED, ) UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index STATUS_TO_UPSTREAM_MAP = { FAILED: UPSTREAM_FAILED, RUNNING: UPSTREAM_RUNNING, PENDING: UPSTREAM_MISSING_INPUT, DISABLED: UPSTREAM_DISABLED, } class scheduler(Config): # TODO(erikbern): the config_path is needed for backwards compatilibity. We should drop the compatibility # at some point (in particular this would force users to replace all dashes with underscores in the config) retry_delay = parameter.FloatParameter(default=900.0) remove_delay = parameter.FloatParameter(default=600.0) worker_disconnect_delay = parameter.FloatParameter(default=60.0) state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle') # Jobs are disabled if we see more than disable_failures failures in disable_window seconds. # These disables last for disable_persist seconds. disable_window = parameter.IntParameter(default=3600, config_path=dict(section='scheduler', name='disable-window-seconds')) disable_failures = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-num-failures')) disable_hard_timeout = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-hard-timeout')) disable_persist = parameter.IntParameter(default=86400, config_path=dict(section='scheduler', name='disable-persist-seconds')) max_shown_tasks = parameter.IntParameter(default=100000) prune_done_tasks = parameter.BoolParameter(default=False) record_task_history = parameter.BoolParameter(default=False) visualization_graph = parameter.Parameter(default="svg", config_path=dict(section='scheduler', name='visualization-graph')) def fix_time(x): # Backwards compatibility for a fix in Dec 2014. Prior to the fix, pickled state might store datetime objects # Let's remove this function soon if isinstance(x, datetime.datetime): return time.mktime(x.timetuple()) else: return x class Failures(object): """ This class tracks the number of failures in a given time window. Failures added are marked with the current timestamp, and this class counts the number of failures in a sliding time window ending at the present. """ def __init__(self, window): """ Initialize with the given window. :param window: how long to track failures for, as a float (number of seconds). """ self.window = window self.failures = collections.deque() self.first_failure_time = None def add_failure(self): """ Add a failure event with the current timestamp. """ failure_time = time.time() if not self.first_failure_time: self.first_failure_time = failure_time self.failures.append(failure_time) def num_failures(self): """ Return the number of failures in the window. """ min_time = time.time() - self.window while self.failures and fix_time(self.failures[0]) < min_time: self.failures.popleft() return len(self.failures) def clear(self): """ Clear the failure queue. """ self.failures.clear() def _get_default(x, default): if x is not None: return x else: return default class Task(object): def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None, params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None): self.id = task_id self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active) self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active if deps is None: self.deps = set() else: self.deps = set(deps) self.status = status # PENDING, RUNNING, FAILED or DONE self.time = time.time() # Timestamp when task was first added self.retry = None self.remove = None self.worker_running = None # the worker id that is currently running the task or None self.time_running = None # Timestamp when picked up by worker self.expl = None self.priority = priority self.resources = _get_default(resources, {}) self.family = family self.module = module self.params = _get_default(params, {}) self.disable_failures = disable_failures self.disable_hard_timeout = disable_hard_timeout self.failures = Failures(disable_window) self.scheduler_disable_time = None def __repr__(self): return "Task(%r)" % vars(self) def add_failure(self): self.failures.add_failure() def has_excessive_failures(self): excessive_failures = False if (self.failures.first_failure_time is not None and self.disable_hard_timeout): if (time.time() >= self.failures.first_failure_time + self.disable_hard_timeout): excessive_failures = True if self.failures.num_failures() >= self.disable_failures: excessive_failures = True return excessive_failures def can_disable(self): return (self.disable_failures is not None or self.disable_hard_timeout is not None) class Worker(object): """ Structure for tracking worker activity and keeping their references. """ def __init__(self, worker_id, last_active=None): self.id = worker_id self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host) self.last_active = last_active # seconds since epoch self.started = time.time() # seconds since epoch self.tasks = set() # task objects self.info = {} def add_info(self, info): self.info.update(info) def update(self, worker_reference): if worker_reference: self.reference = worker_reference self.last_active = time.time() def prune(self, config): # Delete workers that haven't said anything for a while (probably killed) if self.last_active + config.worker_disconnect_delay < time.time(): return True def get_pending_tasks(self): return six.moves.filter(lambda task: task.status in [PENDING, RUNNING], self.tasks) def is_trivial_worker(self): """ If it's not an assistant having only tasks that are without requirements """ if self.assistant: return False return all(not task.resources for task in self.get_pending_tasks()) @property def assistant(self): return self.info.get('assistant', False) def __str__(self): return self.id class SimpleTaskState(object): """ Keep track of the current state and handle persistance. The point of this class is to enable other ways to keep state, eg. by using a database These will be implemented by creating an abstract base class that this and other classes inherit from. """ def __init__(self, state_path): self._state_path = state_path self._tasks = {} # map from id to a Task object self._status_tasks = collections.defaultdict(dict) self._active_workers = {} # map from id to a Worker object def dump(self): state = (self._tasks, self._active_workers) try: with open(self._state_path, 'wb') as fobj: pickle.dump(state, fobj) except IOError: logger.warning("Failed saving scheduler state", exc_info=1) else: logger.info("Saved state in %s", self._state_path) # prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control? def load(self): if os.path.exists(self._state_path): logger.info("Attempting to load state from %s", self._state_path) try: with open(self._state_path, 'rb') as fobj: state = pickle.load(fobj) except BaseException: logger.exception("Error when loading state. Starting from clean slate.") return self._tasks, self._active_workers = state self._status_tasks = collections.defaultdict(dict) for task in six.itervalues(self._tasks): self._status_tasks[task.status][task.id] = task # Convert from old format # TODO: this is really ugly, we need something more future-proof # Every time we add an attribute to the Worker class, this code needs to be updated for k, v in six.iteritems(self._active_workers): if isinstance(v, float): self._active_workers[k] = Worker(worker_id=k, last_active=v) if any(not hasattr(w, 'tasks') for k, w in six.iteritems(self._active_workers)): # If you load from an old format where Workers don't contain tasks. for k, worker in six.iteritems(self._active_workers): worker.tasks = set() for task in six.itervalues(self._tasks): for worker_id in task.workers: self._active_workers[worker_id].tasks.add(task) else: logger.info("No prior state file exists at %s. Starting with clean slate", self._state_path) def get_active_tasks(self, status=None): if status: for task in six.itervalues(self._status_tasks[status]): yield task else: for task in six.itervalues(self._tasks): yield task def get_running_tasks(self): return six.itervalues(self._status_tasks[RUNNING]) def get_pending_tasks(self): return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status]) for status in [PENDING, RUNNING]) def get_task(self, task_id, default=None, setdefault=None): if setdefault: task = self._tasks.setdefault(task_id, setdefault) self._status_tasks[task.status][task.id] = task return task else: return self._tasks.get(task_id, default) def has_task(self, task_id): return task_id in self._tasks def re_enable(self, task, config=None): task.scheduler_disable_time = None task.failures.clear() if config: self.set_status(task, FAILED, config) task.failures.clear() def set_status(self, task, new_status, config=None): if new_status == FAILED: assert config is not None # not sure why we have SUSPENDED, as it can never be set if new_status == SUSPENDED: new_status = PENDING if new_status == DISABLED and task.status == RUNNING: return if task.status == DISABLED: if new_status == DONE: self.re_enable(task) # don't allow workers to override a scheduler disable elif task.scheduler_disable_time is not None: return if new_status == FAILED and task.can_disable(): task.add_failure() if task.has_excessive_failures(): task.scheduler_disable_time = time.time() new_status = DISABLED notifications.send_error_email( 'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id), '{task} failed {failures} times in the last {window} seconds, so it is being ' 'disabled for {persist} seconds'.format( failures=config.disable_failures, task=task.id, window=config.disable_window, persist=config.disable_persist, )) elif new_status == DISABLED: task.scheduler_disable_time = None self._status_tasks[task.status].pop(task.id) self._status_tasks[new_status][task.id] = task task.status = new_status def prune(self, task, config, assistants): remove = False # Mark tasks with no remaining active stakeholders for deletion if not task.stakeholders: if task.remove is None: logger.info("Task %r has stakeholders %r but none remain connected -> will remove " "task in %s seconds", task.id, task.stakeholders, config.remove_delay) task.remove = time.time() + config.remove_delay # If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants: logger.info("Task %r is marked as running by disconnected worker %r -> marking as " "FAILED with retry delay of %rs", task.id, task.worker_running, config.retry_delay) task.worker_running = None self.set_status(task, FAILED, config) task.retry = time.time() + config.retry_delay # Re-enable task after the disable time expires if task.status == DISABLED and task.scheduler_disable_time: if time.time() - fix_time(task.scheduler_disable_time) > config.disable_persist: self.re_enable(task, config) # Remove tasks that have no stakeholders if task.remove and time.time() > task.remove: logger.info("Removing task %r (no connected stakeholders)", task.id) remove = True # Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0 if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time(): self.set_status(task, PENDING, config) return remove def inactivate_tasks(self, delete_tasks): # The terminology is a bit confusing: we used to "delete" tasks when they became inactive, # but with a pluggable state storage, you might very well want to keep some history of # older tasks as well. That's why we call it "inactivate" (as in the verb) for task in delete_tasks: task_obj = self._tasks.pop(task) self._status_tasks[task_obj.status].pop(task) def get_active_workers(self, last_active_lt=None): for worker in six.itervalues(self._active_workers): if last_active_lt is not None and worker.last_active >= last_active_lt: continue yield worker def get_assistants(self, last_active_lt=None): return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt)) def get_worker_ids(self): return self._active_workers.keys() # only used for unit tests def get_worker(self, worker_id): return self._active_workers.setdefault(worker_id, Worker(worker_id)) def inactivate_workers(self, delete_workers): # Mark workers as inactive for worker in delete_workers: self._active_workers.pop(worker) # remove workers from tasks for task in self.get_active_tasks(): task.stakeholders.difference_update(delete_workers) task.workers.difference_update(delete_workers) def get_necessary_tasks(self): necessary_tasks = set() for task in self.get_active_tasks(): if task.status not in (DONE, DISABLED) or \ getattr(task, 'scheduler_disable_time', None) is not None: necessary_tasks.update(task.deps) necessary_tasks.add(task.id) return necessary_tasks class CentralPlannerScheduler(Scheduler): """ Async scheduler that can handle multiple workers, etc. Can be run locally or on a server (using RemoteScheduler + server.Server). """ def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs): """ Keyword Arguments: :param config: an object of class "scheduler" or None (in which the global instance will be used) :param resources: a dict of str->int constraints :param task_history_override: ignore config and use this object as the task history """ self._config = config or scheduler(**kwargs) self._state = SimpleTaskState(self._config.state_path) if task_history_impl: self._task_history = task_history_impl elif self._config.record_task_history: from luigi import db_task_history # Needs sqlalchemy, thus imported here self._task_history = db_task_history.DbTaskHistory() else: self._task_history = history.NopHistory() self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter? self._make_task = functools.partial( Task, disable_failures=self._config.disable_failures, disable_hard_timeout=self._config.disable_hard_timeout, disable_window=self._config.disable_window) def load(self): self._state.load() def dump(self): self._state.dump() def prune(self): logger.info("Starting pruning of task graph") remove_workers = [] for worker in self._state.get_active_workers(): if worker.prune(self._config): logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay) remove_workers.append(worker.id) self._state.inactivate_workers(remove_workers) assistant_ids = set(w.id for w in self._state.get_assistants()) remove_tasks = [] if assistant_ids: necessary_tasks = self._state.get_necessary_tasks() else: necessary_tasks = () for task in self._state.get_active_tasks(): if task.id not in necessary_tasks and self._state.prune(task, self._config, assistant_ids): remove_tasks.append(task.id) self._state.inactivate_tasks(remove_tasks) logger.info("Done pruning task graph") def update(self, worker_id, worker_reference=None): """ Keep track of whenever the worker was last active. """ worker = self._state.get_worker(worker_id) worker.update(worker_reference) def _update_priority(self, task, prio, worker): """ Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled. """ task.priority = prio = max(prio, task.priority) for dep in task.deps or []: t = self._state.get_task(dep) if t is not None and prio > t.priority: self._update_priority(t, prio, worker) def add_task(self, task_id=None, status=PENDING, runnable=True, deps=None, new_deps=None, expl=None, resources=None, priority=0, family='', module=None, params=None, assistant=False, **kwargs): """ * add task identified by task_id if it doesn't exist * if deps is not None, update dependency list * update status of task * add additional workers/stakeholders * update priority when needed """ worker_id = kwargs['worker'] self.update(worker_id) task = self._state.get_task(task_id, setdefault=self._make_task( task_id=task_id, status=PENDING, deps=deps, resources=resources, priority=priority, family=family, module=module, params=params)) # for setting priority, we'll sometimes create tasks with unset family and params if not task.family: task.family = family if not getattr(task, 'module', None): task.module = module if not task.params: task.params = _get_default(params, {}) if task.remove is not None: task.remove = None # unmark task for removal so it isn't removed after being added if not (task.status == RUNNING and status == PENDING): # don't allow re-scheduling of task while it is running, it must either fail or succeed first if status == PENDING or status != task.status: # Update the DB only if there was a acctual change, to prevent noise. # We also check for status == PENDING b/c that's the default value # (so checking for status != task.status woule lie) self._update_task_history(task_id, status) self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config) if status == FAILED: task.retry = time.time() + self._config.retry_delay if deps is not None: task.deps = set(deps) if new_deps is not None: task.deps.update(new_deps) if resources is not None: task.resources = resources if not assistant: task.stakeholders.add(worker_id) # Task dependencies might not exist yet. Let's create dummy tasks for them for now. # Otherwise the task dependencies might end up being pruned if scheduling takes a long time for dep in task.deps or []: t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority)) t.stakeholders.add(worker_id) self._update_priority(task, priority, worker_id) if runnable: task.workers.add(worker_id) self._state.get_worker(worker_id).tasks.add(task) if expl is not None: task.expl = expl def add_worker(self, worker, info, **kwargs): self._state.get_worker(worker).add_info(info) def update_resources(self, **resources): if self._resources is None: self._resources = {} self._resources.update(resources) def _has_resources(self, needed_resources, used_resources): if needed_resources is None: return True available_resources = self._resources or {} for resource, amount in six.iteritems(needed_resources): if amount + used_resources[resource] > available_resources.get(resource, 1): return False return True def _used_resources(self): used_resources = collections.defaultdict(int) if self._resources is not None: for task in self._state.get_active_tasks(): if task.status == RUNNING and task.resources: for resource, amount in six.iteritems(task.resources): used_resources[resource] += amount return used_resources def _rank(self, among_tasks): """ Return worker's rank function for task scheduling. :return: """ dependents = collections.defaultdict(int) def not_done(t): task = self._state.get_task(t, default=None) return task is None or task.status != DONE for task in among_tasks: if task.status != DONE: deps = list(filter(not_done, task.deps)) inverse_num_deps = 1.0 / max(len(deps), 1) for dep in deps: dependents[dep] += inverse_num_deps return lambda task: (task.priority, dependents[task.id], -task.time) def _schedulable(self, task): if task.status != PENDING: return False for dep in task.deps: dep_task = self._state.get_task(dep, default=None) if dep_task is None or dep_task.status != DONE: return False return True def get_work(self, host=None, assistant=False, **kwargs): # TODO: remove any expired nodes # Algo: iterate over all nodes, find the highest priority node no dependencies and available # resources. # Resource checking looks both at currently available resources and at which resources would # be available if all running tasks died and we rescheduled all workers greedily. We do both # checks in order to prevent a worker with many low-priority tasks from starving other # workers with higher priority tasks that share the same resources. # TODO: remove tasks that can't be done, figure out if the worker has absolutely # nothing it can wait for worker_id = kwargs['worker'] # Return remaining tasks that have no FAILED descendents self.update(worker_id, {'host': host}) if assistant: self.add_worker(worker_id, [('assistant', assistant)]) best_task = None locally_pending_tasks = 0 running_tasks = [] upstream_table = {} greedy_resources = collections.defaultdict(int) n_unique_pending = 0 worker = self._state.get_worker(worker_id) if worker.is_trivial_worker(): relevant_tasks = worker.get_pending_tasks() used_resources = collections.defaultdict(int) greedy_workers = dict() # If there's no resources, then they can grab any task else: relevant_tasks = self._state.get_pending_tasks() used_resources = self._used_resources() greedy_workers = dict((worker.id, worker.info.get('workers', 1)) for worker in self._state.get_active_workers()) tasks = list(relevant_tasks) tasks.sort(key=self._rank(among_tasks=tasks), reverse=True) for task in tasks: upstream_status = self._upstream_status(task.id, upstream_table) in_workers = (assistant and task.workers) or worker_id in task.workers if task.status == RUNNING and in_workers: # Return a list of currently running tasks to the client, # makes it easier to troubleshoot other_worker = self._state.get_worker(task.worker_running) more_info = {'task_id': task.id, 'worker': str(other_worker)} if other_worker is not None: more_info.update(other_worker.info) running_tasks.append(more_info) if task.status == PENDING and in_workers and upstream_status != UPSTREAM_DISABLED: locally_pending_tasks += 1 if len(task.workers) == 1 and not assistant: n_unique_pending += 1 if task.status == RUNNING and (task.worker_running in greedy_workers): greedy_workers[task.worker_running] -= 1 for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount if not best_task and self._schedulable(task) and self._has_resources(task.resources, greedy_resources): if in_workers and self._has_resources(task.resources, used_resources): best_task = task else: workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers for task_worker in workers: if greedy_workers.get(task_worker, 0) > 0: # use up a worker greedy_workers[task_worker] -= 1 # keep track of the resources used in greedy scheduling for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount break reply = {'n_pending_tasks': locally_pending_tasks, 'running_tasks': running_tasks, 'task_id': None, 'n_unique_pending': n_unique_pending} if best_task: self._state.set_status(best_task, RUNNING, self._config) best_task.worker_running = worker_id best_task.time_running = time.time() self._update_task_history(best_task.id, RUNNING, host=host) reply['task_id'] = best_task.id reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = best_task.params return reply def ping(self, **kwargs): worker_id = kwargs['worker'] self.update(worker_id) def _upstream_status(self, task_id, upstream_status_table): if task_id in upstream_status_table: return upstream_status_table[task_id] elif self._state.has_task(task_id): task_stack = [task_id] while task_stack: dep_id = task_stack.pop() if self._state.has_task(dep_id): dep = self._state.get_task(dep_id) if dep_id not in upstream_status_table: if dep.status == PENDING and dep.deps: task_stack = task_stack + [dep_id] + list(dep.deps) upstream_status_table[dep_id] = '' # will be updated postorder else: dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '') upstream_status_table[dep_id] = dep_status elif upstream_status_table[dep_id] == '' and dep.deps: # This is the postorder update step when we set the # status based on the previously calculated child elements upstream_status = [upstream_status_table.get(task_id, '') for task_id in dep.deps] upstream_status.append('') # to handle empty list status = max(upstream_status, key=UPSTREAM_SEVERITY_KEY) upstream_status_table[dep_id] = status return upstream_status_table[dep_id] def _serialize_task(self, task_id, include_deps=True): task = self._state.get_task(task_id) ret = { 'status': task.status, 'workers': list(task.workers), 'worker_running': task.worker_running, 'time_running': getattr(task, "time_running", None), 'start_time': task.time, 'params': task.params, 'name': task.family, 'priority': task.priority, 'resources': task.resources, } if task.status == DISABLED: ret['re_enable_able'] = task.scheduler_disable_time is not None if include_deps: ret['deps'] = list(task.deps) return ret def graph(self, **kwargs): self.prune() serialized = {} for task in self._state.get_active_tasks(): serialized[task.id] = self._serialize_task(task.id) return serialized def _recurse_deps(self, task_id, serialized): if task_id not in serialized: task = self._state.get_task(task_id) if task is None or not task.family: logger.warn('Missing task for id [%s]', task_id) # try to infer family and params from task_id try: family, _, param_str = task_id.rstrip(')').partition('(') params = dict(param.split('=') for param in param_str.split(', ')) except BaseException: family, params = '', {} serialized[task_id] = { 'deps': [], 'status': UNKNOWN, 'workers': [], 'start_time': UNKNOWN, 'params': params, 'name': family, 'priority': 0, } else: serialized[task_id] = self._serialize_task(task_id) for dep in task.deps: self._recurse_deps(dep, serialized) def dep_graph(self, task_id, **kwargs): self.prune() serialized = {} if self._state.has_task(task_id): self._recurse_deps(task_id, serialized) return serialized def task_list(self, status, upstream_status, limit=True, search=None, **kwargs): """ Query for a subset of tasks by status. """ self.prune() result = {} upstream_status_table = {} # used to memoize upstream status if search is None: filter_func = lambda _: True else: terms = search.split() filter_func = lambda t: all(term in t.id for term in terms) for task in filter(filter_func, self._state.get_active_tasks(status)): if (task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table)): serialized = self._serialize_task(task.id, False) result[task.id] = serialized if limit and len(result) > self._config.max_shown_tasks: return {'num_tasks': len(result)} return result def worker_list(self, include_running=True, **kwargs): self.prune() workers = [ dict( name=worker.id, last_active=worker.last_active, started=getattr(worker, 'started', None), **worker.info ) for worker in self._state.get_active_workers()] workers.sort(key=lambda worker: worker['started'], reverse=True) if include_running: running = collections.defaultdict(dict) num_pending = collections.defaultdict(int) num_uniques = collections.defaultdict(int) for task in self._state.get_pending_tasks(): if task.status == RUNNING and task.worker_running: running[task.worker_running][task.id] = self._serialize_task(task.id, False) elif task.status == PENDING: for worker in task.workers: num_pending[worker] += 1 if len(task.workers) == 1: num_uniques[list(task.workers)[0]] += 1 for worker in workers: tasks = running[worker['name']] worker['num_running'] = len(tasks) worker['num_pending'] = num_pending[worker['name']] worker['num_uniques'] = num_uniques[worker['name']] worker['running'] = tasks return workers def inverse_dep_graph(self, task_id, **kwargs): self.prune() serialized = {} if self._state.has_task(task_id): self._traverse_inverse_deps(task_id, serialized) return serialized def _traverse_inverse_deps(self, task_id, serialized): stack = [task_id] serialized[task_id] = self._serialize_task(task_id) while len(stack) > 0: curr_id = stack.pop() for task in self._state.get_active_tasks(): if curr_id in task.deps: serialized[curr_id]["deps"].append(task.id) if task.id not in serialized: serialized[task.id] = self._serialize_task(task.id) serialized[task.id]["deps"] = [] stack.append(task.id) def task_search(self, task_str, **kwargs): """ Query for a subset of tasks by task_id. :param task_str: :return: """ self.prune() result = collections.defaultdict(dict) for task in self._state.get_active_tasks(): if task.id.find(task_str) != -1: serialized = self._serialize_task(task.id, False) result[task.status][task.id] = serialized return result def re_enable_task(self, task_id): serialized = {} task = self._state.get_task(task_id) if task and task.status == DISABLED and task.scheduler_disable_time: self._state.re_enable(task, self._config) serialized = self._serialize_task(task_id) return serialized def fetch_error(self, task_id, **kwargs): if self._state.has_task(task_id): return {"taskId": task_id, "error": self._state.get_task(task_id).expl} else: return {"taskId": task_id, "error": ""} def _update_task_history(self, task_id, status, host=None): try: if status == DONE or status == FAILED: successful = (status == DONE) self._task_history.task_finished(task_id, successful) elif status == PENDING: self._task_history.task_scheduled(task_id) elif status == RUNNING: self._task_history.task_started(task_id, host) except BaseException: logger.warning("Error saving Task history", exc_info=True) @property def task_history(self): # Used by server.py to expose the calls return self._task_history # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The system for scheduling tasks and executing them in order. Deals with dependencies, priorities, resources, etc. The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them. See :doc:`/central_scheduler` for more info. """ import collections try: import cPickle as pickle except ImportError: import pickle import datetime import functools import itertools import logging import os import time from luigi import six from luigi import configuration from luigi import notifications from luigi import parameter from luigi import task_history as history from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN from luigi.task import Config logger = logging.getLogger("luigi.server") class Scheduler(object): """ Abstract base class. Note that the methods all take string arguments, not Task objects... """"" add_task = NotImplemented get_work = NotImplemented ping = NotImplemented UPSTREAM_RUNNING = 'UPSTREAM_RUNNING' UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT' UPSTREAM_FAILED = 'UPSTREAM_FAILED' UPSTREAM_DISABLED = 'UPSTREAM_DISABLED' UPSTREAM_SEVERITY_ORDER = ( '', UPSTREAM_RUNNING, UPSTREAM_MISSING_INPUT, UPSTREAM_FAILED, UPSTREAM_DISABLED, ) UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index STATUS_TO_UPSTREAM_MAP = { FAILED: UPSTREAM_FAILED, RUNNING: UPSTREAM_RUNNING, PENDING: UPSTREAM_MISSING_INPUT, DISABLED: UPSTREAM_DISABLED, } class scheduler(Config): # TODO(erikbern): the config_path is needed for backwards compatilibity. We should drop the compatibility # at some point (in particular this would force users to replace all dashes with underscores in the config) retry_delay = parameter.FloatParameter(default=900.0) remove_delay = parameter.FloatParameter(default=600.0) worker_disconnect_delay = parameter.FloatParameter(default=60.0) state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle') # Jobs are disabled if we see more than disable_failures failures in disable_window seconds. # These disables last for disable_persist seconds. disable_window = parameter.IntParameter(default=3600, config_path=dict(section='scheduler', name='disable-window-seconds')) disable_failures = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-num-failures')) disable_hard_timeout = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-hard-timeout')) disable_persist = parameter.IntParameter(default=86400, config_path=dict(section='scheduler', name='disable-persist-seconds')) max_shown_tasks = parameter.IntParameter(default=100000) prune_done_tasks = parameter.BoolParameter(default=False) record_task_history = parameter.BoolParameter(default=False) visualization_graph = parameter.Parameter(default="svg", config_path=dict(section='scheduler', name='visualization-graph')) def fix_time(x): # Backwards compatibility for a fix in Dec 2014. Prior to the fix, pickled state might store datetime objects # Let's remove this function soon if isinstance(x, datetime.datetime): return time.mktime(x.timetuple()) else: return x class Failures(object): """ This class tracks the number of failures in a given time window. Failures added are marked with the current timestamp, and this class counts the number of failures in a sliding time window ending at the present. """ def __init__(self, window): """ Initialize with the given window. :param window: how long to track failures for, as a float (number of seconds). """ self.window = window self.failures = collections.deque() self.first_failure_time = None def add_failure(self): """ Add a failure event with the current timestamp. """ failure_time = time.time() if not self.first_failure_time: self.first_failure_time = failure_time self.failures.append(failure_time) def num_failures(self): """ Return the number of failures in the window. """ min_time = time.time() - self.window while self.failures and fix_time(self.failures[0]) < min_time: self.failures.popleft() return len(self.failures) def clear(self): """ Clear the failure queue. """ self.failures.clear() def _get_default(x, default): if x is not None: return x else: return default class Task(object): def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None, params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None): self.id = task_id self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active) self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active if deps is None: self.deps = set() else: self.deps = set(deps) self.status = status # PENDING, RUNNING, FAILED or DONE self.time = time.time() # Timestamp when task was first added self.retry = None self.remove = None self.worker_running = None # the worker id that is currently running the task or None self.time_running = None # Timestamp when picked up by worker self.expl = None self.priority = priority self.resources = _get_default(resources, {}) self.family = family self.module = module self.params = _get_default(params, {}) self.disable_failures = disable_failures self.disable_hard_timeout = disable_hard_timeout self.failures = Failures(disable_window) self.scheduler_disable_time = None def __repr__(self): return "Task(%r)" % vars(self) def add_failure(self): self.failures.add_failure() def has_excessive_failures(self): excessive_failures = False if (self.failures.first_failure_time is not None and self.disable_hard_timeout): if (time.time() >= self.failures.first_failure_time + self.disable_hard_timeout): excessive_failures = True if self.failures.num_failures() >= self.disable_failures: excessive_failures = True return excessive_failures def can_disable(self): return (self.disable_failures is not None or self.disable_hard_timeout is not None) class Worker(object): """ Structure for tracking worker activity and keeping their references. """ def __init__(self, worker_id, last_active=time.time()): self.id = worker_id self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host) self.last_active = last_active # seconds since epoch self.started = time.time() # seconds since epoch self.tasks = set() # task objects self.info = {} def add_info(self, info): self.info.update(info) def update(self, worker_reference): if worker_reference: self.reference = worker_reference self.last_active = time.time() def prune(self, config): # Delete workers that haven't said anything for a while (probably killed) if self.last_active + config.worker_disconnect_delay < time.time(): return True def get_pending_tasks(self): return six.moves.filter(lambda task: task.status in [PENDING, RUNNING], self.tasks) def is_trivial_worker(self): """ If it's not an assistant having only tasks that are without requirements """ if self.assistant: return False return all(not task.resources for task in self.get_pending_tasks()) @property def assistant(self): return self.info.get('assistant', False) def __str__(self): return self.id class SimpleTaskState(object): """ Keep track of the current state and handle persistance. The point of this class is to enable other ways to keep state, eg. by using a database These will be implemented by creating an abstract base class that this and other classes inherit from. """ def __init__(self, state_path): self._state_path = state_path self._tasks = {} # map from id to a Task object self._status_tasks = collections.defaultdict(dict) self._active_workers = {} # map from id to a Worker object def dump(self): state = (self._tasks, self._active_workers) try: with open(self._state_path, 'wb') as fobj: pickle.dump(state, fobj) except IOError: logger.warning("Failed saving scheduler state", exc_info=1) else: logger.info("Saved state in %s", self._state_path) # prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control? def load(self): if os.path.exists(self._state_path): logger.info("Attempting to load state from %s", self._state_path) try: with open(self._state_path, 'rb') as fobj: state = pickle.load(fobj) except BaseException: logger.exception("Error when loading state. Starting from clean slate.") return self._tasks, self._active_workers = state self._status_tasks = collections.defaultdict(dict) for task in six.itervalues(self._tasks): self._status_tasks[task.status][task.id] = task # Convert from old format # TODO: this is really ugly, we need something more future-proof # Every time we add an attribute to the Worker class, this code needs to be updated for k, v in six.iteritems(self._active_workers): if isinstance(v, float): self._active_workers[k] = Worker(worker_id=k, last_active=v) if any(not hasattr(w, 'tasks') for k, w in six.iteritems(self._active_workers)): # If you load from an old format where Workers don't contain tasks. for k, worker in six.iteritems(self._active_workers): worker.tasks = set() for task in six.itervalues(self._tasks): for worker_id in task.workers: self._active_workers[worker_id].tasks.add(task) else: logger.info("No prior state file exists at %s. Starting with clean slate", self._state_path) def get_active_tasks(self, status=None): if status: for task in six.itervalues(self._status_tasks[status]): yield task else: for task in six.itervalues(self._tasks): yield task def get_running_tasks(self): return six.itervalues(self._status_tasks[RUNNING]) def get_pending_tasks(self): return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status]) for status in [PENDING, RUNNING]) def get_task(self, task_id, default=None, setdefault=None): if setdefault: task = self._tasks.setdefault(task_id, setdefault) self._status_tasks[task.status][task.id] = task return task else: return self._tasks.get(task_id, default) def has_task(self, task_id): return task_id in self._tasks def re_enable(self, task, config=None): task.scheduler_disable_time = None task.failures.clear() if config: self.set_status(task, FAILED, config) task.failures.clear() def set_status(self, task, new_status, config=None): if new_status == FAILED: assert config is not None # not sure why we have SUSPENDED, as it can never be set if new_status == SUSPENDED: new_status = PENDING if new_status == DISABLED and task.status == RUNNING: return if task.status == DISABLED: if new_status == DONE: self.re_enable(task) # don't allow workers to override a scheduler disable elif task.scheduler_disable_time is not None: return if new_status == FAILED and task.can_disable(): task.add_failure() if task.has_excessive_failures(): task.scheduler_disable_time = time.time() new_status = DISABLED notifications.send_error_email( 'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id), '{task} failed {failures} times in the last {window} seconds, so it is being ' 'disabled for {persist} seconds'.format( failures=config.disable_failures, task=task.id, window=config.disable_window, persist=config.disable_persist, )) elif new_status == DISABLED: task.scheduler_disable_time = None self._status_tasks[task.status].pop(task.id) self._status_tasks[new_status][task.id] = task task.status = new_status def prune(self, task, config, assistants): remove = False # Mark tasks with no remaining active stakeholders for deletion if not task.stakeholders: if task.remove is None: logger.info("Task %r has stakeholders %r but none remain connected -> will remove " "task in %s seconds", task.id, task.stakeholders, config.remove_delay) task.remove = time.time() + config.remove_delay # If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants: logger.info("Task %r is marked as running by disconnected worker %r -> marking as " "FAILED with retry delay of %rs", task.id, task.worker_running, config.retry_delay) task.worker_running = None self.set_status(task, FAILED, config) task.retry = time.time() + config.retry_delay # Re-enable task after the disable time expires if task.status == DISABLED and task.scheduler_disable_time: if time.time() - fix_time(task.scheduler_disable_time) > config.disable_persist: self.re_enable(task, config) # Remove tasks that have no stakeholders if task.remove and time.time() > task.remove: logger.info("Removing task %r (no connected stakeholders)", task.id) remove = True # Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0 if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time(): self.set_status(task, PENDING, config) return remove def inactivate_tasks(self, delete_tasks): # The terminology is a bit confusing: we used to "delete" tasks when they became inactive, # but with a pluggable state storage, you might very well want to keep some history of # older tasks as well. That's why we call it "inactivate" (as in the verb) for task in delete_tasks: task_obj = self._tasks.pop(task) self._status_tasks[task_obj.status].pop(task) def get_active_workers(self, last_active_lt=None): for worker in six.itervalues(self._active_workers): if last_active_lt is not None and worker.last_active >= last_active_lt: continue yield worker def get_assistants(self, last_active_lt=None): return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt)) def get_worker_ids(self): return self._active_workers.keys() # only used for unit tests def get_worker(self, worker_id): return self._active_workers.setdefault(worker_id, Worker(worker_id)) def inactivate_workers(self, delete_workers): # Mark workers as inactive for worker in delete_workers: self._active_workers.pop(worker) # remove workers from tasks for task in self.get_active_tasks(): task.stakeholders.difference_update(delete_workers) task.workers.difference_update(delete_workers) def get_necessary_tasks(self): necessary_tasks = set() for task in self.get_active_tasks(): if task.status not in (DONE, DISABLED) or \ getattr(task, 'scheduler_disable_time', None) is not None: necessary_tasks.update(task.deps) necessary_tasks.add(task.id) return necessary_tasks class CentralPlannerScheduler(Scheduler): """ Async scheduler that can handle multiple workers, etc. Can be run locally or on a server (using RemoteScheduler + server.Server). """ def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs): """ Keyword Arguments: :param config: an object of class "scheduler" or None (in which the global instance will be used) :param resources: a dict of str->int constraints :param task_history_override: ignore config and use this object as the task history """ self._config = config or scheduler(**kwargs) self._state = SimpleTaskState(self._config.state_path) if task_history_impl: self._task_history = task_history_impl elif self._config.record_task_history: from luigi import db_task_history # Needs sqlalchemy, thus imported here self._task_history = db_task_history.DbTaskHistory() else: self._task_history = history.NopHistory() self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter? self._make_task = functools.partial( Task, disable_failures=self._config.disable_failures, disable_hard_timeout=self._config.disable_hard_timeout, disable_window=self._config.disable_window) def load(self): self._state.load() def dump(self): self._state.dump() def prune(self): logger.info("Starting pruning of task graph") remove_workers = [] for worker in self._state.get_active_workers(): if worker.prune(self._config): logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay) remove_workers.append(worker.id) self._state.inactivate_workers(remove_workers) assistant_ids = set(w.id for w in self._state.get_assistants()) remove_tasks = [] if assistant_ids: necessary_tasks = self._state.get_necessary_tasks() else: necessary_tasks = () for task in self._state.get_active_tasks(): if task.id not in necessary_tasks and self._state.prune(task, self._config, assistant_ids): remove_tasks.append(task.id) self._state.inactivate_tasks(remove_tasks) logger.info("Done pruning task graph") def update(self, worker_id, worker_reference=None): """ Keep track of whenever the worker was last active. """ worker = self._state.get_worker(worker_id) worker.update(worker_reference) def _update_priority(self, task, prio, worker): """ Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled. """ task.priority = prio = max(prio, task.priority) for dep in task.deps or []: t = self._state.get_task(dep) if t is not None and prio > t.priority: self._update_priority(t, prio, worker) def add_task(self, task_id=None, status=PENDING, runnable=True, deps=None, new_deps=None, expl=None, resources=None, priority=0, family='', module=None, params=None, assistant=False, **kwargs): """ * add task identified by task_id if it doesn't exist * if deps is not None, update dependency list * update status of task * add additional workers/stakeholders * update priority when needed """ worker_id = kwargs['worker'] self.update(worker_id) task = self._state.get_task(task_id, setdefault=self._make_task( task_id=task_id, status=PENDING, deps=deps, resources=resources, priority=priority, family=family, module=module, params=params)) # for setting priority, we'll sometimes create tasks with unset family and params if not task.family: task.family = family if not getattr(task, 'module', None): task.module = module if not task.params: task.params = _get_default(params, {}) if task.remove is not None: task.remove = None # unmark task for removal so it isn't removed after being added if not (task.status == RUNNING and status == PENDING): # don't allow re-scheduling of task while it is running, it must either fail or succeed first if status == PENDING or status != task.status: # Update the DB only if there was a acctual change, to prevent noise. # We also check for status == PENDING b/c that's the default value # (so checking for status != task.status woule lie) self._update_task_history(task_id, status) self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config) if status == FAILED: task.retry = time.time() + self._config.retry_delay if deps is not None: task.deps = set(deps) if new_deps is not None: task.deps.update(new_deps) if resources is not None: task.resources = resources if not assistant: task.stakeholders.add(worker_id) # Task dependencies might not exist yet. Let's create dummy tasks for them for now. # Otherwise the task dependencies might end up being pruned if scheduling takes a long time for dep in task.deps or []: t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority)) t.stakeholders.add(worker_id) self._update_priority(task, priority, worker_id) if runnable: task.workers.add(worker_id) self._state.get_worker(worker_id).tasks.add(task) if expl is not None: task.expl = expl def add_worker(self, worker, info, **kwargs): self._state.get_worker(worker).add_info(info) def update_resources(self, **resources): if self._resources is None: self._resources = {} self._resources.update(resources) def _has_resources(self, needed_resources, used_resources): if needed_resources is None: return True available_resources = self._resources or {} for resource, amount in six.iteritems(needed_resources): if amount + used_resources[resource] > available_resources.get(resource, 1): return False return True def _used_resources(self): used_resources = collections.defaultdict(int) if self._resources is not None: for task in self._state.get_active_tasks(): if task.status == RUNNING and task.resources: for resource, amount in six.iteritems(task.resources): used_resources[resource] += amount return used_resources def _rank(self, among_tasks): """ Return worker's rank function for task scheduling. :return: """ dependents = collections.defaultdict(int) def not_done(t): task = self._state.get_task(t, default=None) return task is None or task.status != DONE for task in among_tasks: if task.status != DONE: deps = list(filter(not_done, task.deps)) inverse_num_deps = 1.0 / max(len(deps), 1) for dep in deps: dependents[dep] += inverse_num_deps return lambda task: (task.priority, dependents[task.id], -task.time) def _schedulable(self, task): if task.status != PENDING: return False for dep in task.deps: dep_task = self._state.get_task(dep, default=None) if dep_task is None or dep_task.status != DONE: return False return True def get_work(self, host=None, assistant=False, **kwargs): # TODO: remove any expired nodes # Algo: iterate over all nodes, find the highest priority node no dependencies and available # resources. # Resource checking looks both at currently available resources and at which resources would # be available if all running tasks died and we rescheduled all workers greedily. We do both # checks in order to prevent a worker with many low-priority tasks from starving other # workers with higher priority tasks that share the same resources. # TODO: remove tasks that can't be done, figure out if the worker has absolutely # nothing it can wait for worker_id = kwargs['worker'] # Return remaining tasks that have no FAILED descendents self.update(worker_id, {'host': host}) if assistant: self.add_worker(worker_id, [('assistant', assistant)]) best_task = None locally_pending_tasks = 0 running_tasks = [] upstream_table = {} greedy_resources = collections.defaultdict(int) n_unique_pending = 0 worker = self._state.get_worker(worker_id) if worker.is_trivial_worker(): relevant_tasks = worker.get_pending_tasks() used_resources = collections.defaultdict(int) greedy_workers = dict() # If there's no resources, then they can grab any task else: relevant_tasks = self._state.get_pending_tasks() used_resources = self._used_resources() greedy_workers = dict((worker.id, worker.info.get('workers', 1)) for worker in self._state.get_active_workers()) tasks = list(relevant_tasks) tasks.sort(key=self._rank(among_tasks=tasks), reverse=True) for task in tasks: upstream_status = self._upstream_status(task.id, upstream_table) in_workers = (assistant and task.workers) or worker_id in task.workers if task.status == RUNNING and in_workers: # Return a list of currently running tasks to the client, # makes it easier to troubleshoot other_worker = self._state.get_worker(task.worker_running) more_info = {'task_id': task.id, 'worker': str(other_worker)} if other_worker is not None: more_info.update(other_worker.info) running_tasks.append(more_info) if task.status == PENDING and in_workers and upstream_status != UPSTREAM_DISABLED: locally_pending_tasks += 1 if len(task.workers) == 1 and not assistant: n_unique_pending += 1 if task.status == RUNNING and (task.worker_running in greedy_workers): greedy_workers[task.worker_running] -= 1 for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount if not best_task and self._schedulable(task) and self._has_resources(task.resources, greedy_resources): if in_workers and self._has_resources(task.resources, used_resources): best_task = task else: workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers for task_worker in workers: if greedy_workers.get(task_worker, 0) > 0: # use up a worker greedy_workers[task_worker] -= 1 # keep track of the resources used in greedy scheduling for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount break reply = {'n_pending_tasks': locally_pending_tasks, 'running_tasks': running_tasks, 'task_id': None, 'n_unique_pending': n_unique_pending} if best_task: self._state.set_status(best_task, RUNNING, self._config) best_task.worker_running = worker_id best_task.time_running = time.time() self._update_task_history(best_task.id, RUNNING, host=host) reply['task_id'] = best_task.id reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = best_task.params return reply def ping(self, **kwargs): worker_id = kwargs['worker'] self.update(worker_id) def _upstream_status(self, task_id, upstream_status_table): if task_id in upstream_status_table: return upstream_status_table[task_id] elif self._state.has_task(task_id): task_stack = [task_id] while task_stack: dep_id = task_stack.pop() if self._state.has_task(dep_id): dep = self._state.get_task(dep_id) if dep_id not in upstream_status_table: if dep.status == PENDING and dep.deps: task_stack = task_stack + [dep_id] + list(dep.deps) upstream_status_table[dep_id] = '' # will be updated postorder else: dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '') upstream_status_table[dep_id] = dep_status elif upstream_status_table[dep_id] == '' and dep.deps: # This is the postorder update step when we set the # status based on the previously calculated child elements upstream_status = [upstream_status_table.get(task_id, '') for task_id in dep.deps] upstream_status.append('') # to handle empty list status = max(upstream_status, key=UPSTREAM_SEVERITY_KEY) upstream_status_table[dep_id] = status return upstream_status_table[dep_id] def _serialize_task(self, task_id, include_deps=True): task = self._state.get_task(task_id) ret = { 'status': task.status, 'workers': list(task.workers), 'worker_running': task.worker_running, 'time_running': getattr(task, "time_running", None), 'start_time': task.time, 'params': task.params, 'name': task.family, 'priority': task.priority, 'resources': task.resources, } if task.status == DISABLED: ret['re_enable_able'] = task.scheduler_disable_time is not None if include_deps: ret['deps'] = list(task.deps) return ret def graph(self, **kwargs): self.prune() serialized = {} for task in self._state.get_active_tasks(): serialized[task.id] = self._serialize_task(task.id) return serialized def _recurse_deps(self, task_id, serialized): if task_id not in serialized: task = self._state.get_task(task_id) if task is None or not task.family: logger.warn('Missing task for id [%s]', task_id) # try to infer family and params from task_id try: family, _, param_str = task_id.rstrip(')').partition('(') params = dict(param.split('=') for param in param_str.split(', ')) except BaseException: family, params = '', {} serialized[task_id] = { 'deps': [], 'status': UNKNOWN, 'workers': [], 'start_time': UNKNOWN, 'params': params, 'name': family, 'priority': 0, } else: serialized[task_id] = self._serialize_task(task_id) for dep in task.deps: self._recurse_deps(dep, serialized) def dep_graph(self, task_id, **kwargs): self.prune() serialized = {} if self._state.has_task(task_id): self._recurse_deps(task_id, serialized) return serialized def task_list(self, status, upstream_status, limit=True, search=None, **kwargs): """ Query for a subset of tasks by status. """ self.prune() result = {} upstream_status_table = {} # used to memoize upstream status if search is None: filter_func = lambda _: True else: terms = search.split() filter_func = lambda t: all(term in t.id for term in terms) for task in filter(filter_func, self._state.get_active_tasks(status)): if (task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table)): serialized = self._serialize_task(task.id, False) result[task.id] = serialized if limit and len(result) > self._config.max_shown_tasks: return {'num_tasks': len(result)} return result def worker_list(self, include_running=True, **kwargs): self.prune() workers = [ dict( name=worker.id, last_active=worker.last_active, started=getattr(worker, 'started', None), **worker.info ) for worker in self._state.get_active_workers()] workers.sort(key=lambda worker: worker['started'], reverse=True) if include_running: running = collections.defaultdict(dict) num_pending = collections.defaultdict(int) num_uniques = collections.defaultdict(int) for task in self._state.get_pending_tasks(): if task.status == RUNNING and task.worker_running: running[task.worker_running][task.id] = self._serialize_task(task.id, False) elif task.status == PENDING: for worker in task.workers: num_pending[worker] += 1 if len(task.workers) == 1: num_uniques[list(task.workers)[0]] += 1 for worker in workers: tasks = running[worker['name']] worker['num_running'] = len(tasks) worker['num_pending'] = num_pending[worker['name']] worker['num_uniques'] = num_uniques[worker['name']] worker['running'] = tasks return workers def inverse_dep_graph(self, task_id, **kwargs): self.prune() serialized = {} if self._state.has_task(task_id): self._traverse_inverse_deps(task_id, serialized) return serialized def _traverse_inverse_deps(self, task_id, serialized): stack = [task_id] serialized[task_id] = self._serialize_task(task_id) while len(stack) > 0: curr_id = stack.pop() for task in self._state.get_active_tasks(): if curr_id in task.deps: serialized[curr_id]["deps"].append(task.id) if task.id not in serialized: serialized[task.id] = self._serialize_task(task.id) serialized[task.id]["deps"] = [] stack.append(task.id) def task_search(self, task_str, **kwargs): """ Query for a subset of tasks by task_id. :param task_str: :return: """ self.prune() result = collections.defaultdict(dict) for task in self._state.get_active_tasks(): if task.id.find(task_str) != -1: serialized = self._serialize_task(task.id, False) result[task.status][task.id] = serialized return result def re_enable_task(self, task_id): serialized = {} task = self._state.get_task(task_id) if task and task.status == DISABLED and task.scheduler_disable_time: self._state.re_enable(task, self._config) serialized = self._serialize_task(task_id) return serialized def fetch_error(self, task_id, **kwargs): if self._state.has_task(task_id): return {"taskId": task_id, "error": self._state.get_task(task_id).expl} else: return {"taskId": task_id, "error": ""} def _update_task_history(self, task_id, status, host=None): try: if status == DONE or status == FAILED: successful = (status == DONE) self._task_history.task_finished(task_id, successful) elif status == PENDING: self._task_history.task_scheduled(task_id) elif status == RUNNING: self._task_history.task_started(task_id, host) except BaseException: logger.warning("Error saving Task history", exc_info=True) @property def task_history(self): # Used by server.py to expose the calls return self._task_history
BugsInPy/BugsInPy/temp/projects/luigi/bug-22-fixed/luigi/luigi/scheduler.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-22-buggy/luigi/luigi/scheduler.py
luigi-bug-29
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Define the centralized register of all :class:`~luigi.task.Task` classes. """ import abc try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict from luigi import six import logging logger = logging.getLogger('luigi-interface') class TaskClassException(Exception): pass class Register(abc.ABCMeta): """ The Metaclass of :py:class:`Task`. Acts as a global registry of Tasks with the following properties: 1. Cache instances of objects so that eg. ``X(1, 2, 3)`` always returns the same object. 2. Keep track of all subclasses of :py:class:`Task` and expose them. """ __instance_cache = {} _default_namespace = None _reg = [] AMBIGUOUS_CLASS = object() # Placeholder denoting an error """If this value is returned by :py:meth:`__get_reg` then there is an ambiguous task name (two :py:class:`Task` have the same name). This denotes an error.""" def __new__(metacls, classname, bases, classdict): """ Custom class creation for namespacing. Also register all subclasses. Set the task namespace to whatever the currently declared namespace is. """ if "task_namespace" not in classdict: classdict["task_namespace"] = metacls._default_namespace cls = super(Register, metacls).__new__(metacls, classname, bases, classdict) metacls._reg.append(cls) return cls def __call__(cls, *args, **kwargs): """ Custom class instantiation utilizing instance cache. If a Task has already been instantiated with the same parameters, the previous instance is returned to reduce number of object instances. """ def instantiate(): return super(Register, cls).__call__(*args, **kwargs) h = cls.__instance_cache if h is None: # disabled return instantiate() params = cls.get_params() param_values = cls.get_param_values(params, args, kwargs) k = (cls, tuple(param_values)) try: hash(k) except TypeError: logger.debug("Not all parameter values are hashable so instance isn't coming from the cache") return instantiate() # unhashable types in parameters if k not in h: h[k] = instantiate() return h[k] @classmethod def clear_instance_cache(cls): """ Clear/Reset the instance cache. """ cls.__instance_cache = {} @classmethod def disable_instance_cache(cls): """ Disables the instance cache. """ cls.__instance_cache = None @property def task_family(cls): """ The task family for the given class. If ``cls.task_namespace is None`` then it's the name of the class. Otherwise, ``<task_namespace>.`` is prefixed to the class name. """ if cls.task_namespace is None: return cls.__name__ else: return "%s.%s" % (cls.task_namespace, cls.__name__) @classmethod def __get_reg(cls): """Return all of the registered classes. :return: an ``collections.OrderedDict`` of task_family -> class """ # We have to do this on-demand in case task names have changed later # We return this in a topologically sorted list of inheritance: this is useful in some cases (#822) reg = OrderedDict() for cls in cls._reg: if cls.run == NotImplemented: continue name = cls.task_family if name in reg and reg[name] != cls and \ reg[name] != cls.AMBIGUOUS_CLASS and \ not issubclass(cls, reg[name]): # Registering two different classes - this means we can't instantiate them by name # The only exception is if one class is a subclass of the other. In that case, we # instantiate the most-derived class (this fixes some issues with decorator wrappers). reg[name] = cls.AMBIGUOUS_CLASS else: reg[name] = cls return reg @classmethod def task_names(cls): """ List of task names as strings """ return sorted(cls.__get_reg().keys()) @classmethod def tasks_str(cls): """ Human-readable register contents dump. """ return ','.join(cls.task_names()) @classmethod def get_task_cls(cls, name): """ Returns an unambiguous class or raises an exception. """ task_cls = cls.__get_reg().get(name) if not task_cls: raise TaskClassException('Task %r not found. Candidates are: %s' % (name, cls.tasks_str())) if task_cls == cls.AMBIGUOUS_CLASS: raise TaskClassException('Task %r is ambiguous' % name) return task_cls @classmethod def get_all_params(cls): """ Compiles and returns all parameters for all :py:class:`Task`. :return: a generator of tuples (TODO: we should make this more elegant) """ for task_name, task_cls in six.iteritems(cls.__get_reg()): if task_cls == cls.AMBIGUOUS_CLASS: continue for param_name, param_obj in task_cls.get_params(): yield task_name, (not task_cls.use_cmdline_section), param_name, param_obj def load_task(module, task_name, params_str): """ Imports task dynamically given a module and a task name. """ if module is not None: __import__(module) task_cls = Register.get_task_cls(task_name) return task_cls.from_str_params(params_str) # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Define the centralized register of all :class:`~luigi.task.Task` classes. """ import abc try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict from luigi import six import logging logger = logging.getLogger('luigi-interface') class TaskClassException(Exception): pass class Register(abc.ABCMeta): """ The Metaclass of :py:class:`Task`. Acts as a global registry of Tasks with the following properties: 1. Cache instances of objects so that eg. ``X(1, 2, 3)`` always returns the same object. 2. Keep track of all subclasses of :py:class:`Task` and expose them. """ __instance_cache = {} _default_namespace = None _reg = [] AMBIGUOUS_CLASS = object() # Placeholder denoting an error """If this value is returned by :py:meth:`__get_reg` then there is an ambiguous task name (two :py:class:`Task` have the same name). This denotes an error.""" def __new__(metacls, classname, bases, classdict): """ Custom class creation for namespacing. Also register all subclasses. Set the task namespace to whatever the currently declared namespace is. """ if "task_namespace" not in classdict: classdict["task_namespace"] = metacls._default_namespace cls = super(Register, metacls).__new__(metacls, classname, bases, classdict) metacls._reg.append(cls) return cls def __call__(cls, *args, **kwargs): """ Custom class instantiation utilizing instance cache. If a Task has already been instantiated with the same parameters, the previous instance is returned to reduce number of object instances. """ def instantiate(): return super(Register, cls).__call__(*args, **kwargs) h = cls.__instance_cache if h is None: # disabled return instantiate() params = cls.get_params() param_values = cls.get_param_values(params, args, kwargs) k = (cls, tuple(param_values)) try: hash(k) except TypeError: logger.debug("Not all parameter values are hashable so instance isn't coming from the cache") return instantiate() # unhashable types in parameters if k not in h: h[k] = instantiate() return h[k] @classmethod def clear_instance_cache(cls): """ Clear/Reset the instance cache. """ cls.__instance_cache = {} @classmethod def disable_instance_cache(cls): """ Disables the instance cache. """ cls.__instance_cache = None @property def task_family(cls): """ The task family for the given class. If ``cls.task_namespace is None`` then it's the name of the class. Otherwise, ``<task_namespace>.`` is prefixed to the class name. """ if cls.task_namespace is None: return cls.__name__ else: return "%s.%s" % (cls.task_namespace, cls.__name__) @classmethod def __get_reg(cls): """Return all of the registered classes. :return: an ``collections.OrderedDict`` of task_family -> class """ # We have to do this on-demand in case task names have changed later # We return this in a topologically sorted list of inheritance: this is useful in some cases (#822) reg = OrderedDict() for cls in cls._reg: name = cls.task_family if name in reg and reg[name] != cls and \ reg[name] != cls.AMBIGUOUS_CLASS and \ not issubclass(cls, reg[name]): # Registering two different classes - this means we can't instantiate them by name # The only exception is if one class is a subclass of the other. In that case, we # instantiate the most-derived class (this fixes some issues with decorator wrappers). reg[name] = cls.AMBIGUOUS_CLASS else: reg[name] = cls return reg @classmethod def task_names(cls): """ List of task names as strings """ return sorted(cls.__get_reg().keys()) @classmethod def tasks_str(cls): """ Human-readable register contents dump. """ return ','.join(cls.task_names()) @classmethod def get_task_cls(cls, name): """ Returns an unambiguous class or raises an exception. """ task_cls = cls.__get_reg().get(name) if not task_cls: raise TaskClassException('Task %r not found. Candidates are: %s' % (name, cls.tasks_str())) if task_cls == cls.AMBIGUOUS_CLASS: raise TaskClassException('Task %r is ambiguous' % name) return task_cls @classmethod def get_all_params(cls): """ Compiles and returns all parameters for all :py:class:`Task`. :return: a generator of tuples (TODO: we should make this more elegant) """ for task_name, task_cls in six.iteritems(cls.__get_reg()): if task_cls == cls.AMBIGUOUS_CLASS: continue for param_name, param_obj in task_cls.get_params(): yield task_name, (not task_cls.use_cmdline_section), param_name, param_obj def load_task(module, task_name, params_str): """ Imports task dynamically given a module and a task name. """ if module is not None: __import__(module) task_cls = Register.get_task_cls(task_name) return task_cls.from_str_params(params_str)
BugsInPy/BugsInPy/temp/projects/luigi/bug-29-fixed/luigi/luigi/task_register.py,BugsInPy/BugsInPy/temp/projects/luigi/bug-29-buggy/luigi/luigi/task_register.py
PySnooper-bug-3
# Copyright 2019 Ram Rachum. # This program is distributed under the MIT license. import sys import os import inspect import types import datetime as datetime_module import re import collections import decorator from . import utils from . import pycompat from .tracer import Tracer def get_write_function(output): if output is None: def write(s): stderr = sys.stderr stderr.write(s) elif isinstance(output, (pycompat.PathLike, str)): def write(s): with open(output_path, 'a') as output_file: output_file.write(s) else: assert isinstance(output, utils.WritableStream) def write(s): output.write(s) return write def snoop(output=None, variables=(), depth=1, prefix=''): ''' Snoop on the function, writing everything it's doing to stderr. This is useful for debugging. When you decorate a function with `@pysnooper.snoop()`, you'll get a log of every line that ran in the function and a play-by-play of every local variable that changed. If stderr is not easily accessible for you, you can redirect the output to a file:: @pysnooper.snoop('/my/log/file.log') See values of some variables that aren't local variables:: @pysnooper.snoop(variables=('foo.bar', 'self.whatever')) Show snoop lines for functions that your function calls:: @pysnooper.snoop(depth=2) Start all snoop lines with a prefix, to grep for them easily:: @pysnooper.snoop(prefix='ZZZ ') ''' write = get_write_function(output) @decorator.decorator def decorate(function, *args, **kwargs): target_code_object = function.__code__ with Tracer(target_code_object=target_code_object, write=write, variables=variables, depth=depth, prefix=prefix): return function(*args, **kwargs) return decorate # Copyright 2019 Ram Rachum. # This program is distributed under the MIT license. import sys import os import inspect import types import datetime as datetime_module import re import collections import decorator from . import utils from . import pycompat from .tracer import Tracer def get_write_function(output): if output is None: def write(s): stderr = sys.stderr stderr.write(s) elif isinstance(output, (pycompat.PathLike, str)): def write(s): with open(output, 'a') as output_file: output_file.write(s) else: assert isinstance(output, utils.WritableStream) def write(s): output.write(s) return write def snoop(output=None, variables=(), depth=1, prefix=''): ''' Snoop on the function, writing everything it's doing to stderr. This is useful for debugging. When you decorate a function with `@pysnooper.snoop()`, you'll get a log of every line that ran in the function and a play-by-play of every local variable that changed. If stderr is not easily accessible for you, you can redirect the output to a file:: @pysnooper.snoop('/my/log/file.log') See values of some variables that aren't local variables:: @pysnooper.snoop(variables=('foo.bar', 'self.whatever')) Show snoop lines for functions that your function calls:: @pysnooper.snoop(depth=2) Start all snoop lines with a prefix, to grep for them easily:: @pysnooper.snoop(prefix='ZZZ ') ''' write = get_write_function(output) @decorator.decorator def decorate(function, *args, **kwargs): target_code_object = function.__code__ with Tracer(target_code_object=target_code_object, write=write, variables=variables, depth=depth, prefix=prefix): return function(*args, **kwargs) return decorate
BugsInPy/BugsInPy/temp/projects/PySnooper/bug-3-fixed/PySnooper/pysnooper/pysnooper.py,BugsInPy/BugsInPy/temp/projects/PySnooper/bug-3-buggy/PySnooper/pysnooper/pysnooper.py
PySnooper-bug-1
# Copyright 2019 Ram Rachum and collaborators. # This program is distributed under the MIT license. '''Python 2/3 compatibility''' import abc import os import inspect import sys PY3 = (sys.version_info[0] == 3) if hasattr(abc, 'ABC'): ABC = abc.ABC else: class ABC(object): """Helper class that provides a standard way to create an ABC using inheritance. """ __metaclass__ = abc.ABCMeta __slots__ = () if hasattr(os, 'PathLike'): PathLike = os.PathLike else: class PathLike(ABC): """Abstract base class for implementing the file system path protocol.""" @abc.abstractmethod def __fspath__(self): """Return the file system path representation of the object.""" raise NotImplementedError @classmethod def __subclasshook__(cls, subclass): return ( hasattr(subclass, '__fspath__') or # Make a concession for older `pathlib` versions:g (hasattr(subclass, 'open') and 'path' in subclass.__name__.lower()) ) try: iscoroutinefunction = inspect.iscoroutinefunction except AttributeError: iscoroutinefunction = lambda whatever: False # Lolz if PY3: string_types = (str,) text_type = str else: string_types = (basestring,) text_type = unicode # Copyright 2019 Ram Rachum and collaborators. # This program is distributed under the MIT license. '''Python 2/3 compatibility''' import abc import os import inspect import sys PY3 = (sys.version_info[0] == 3) PY2 = not PY3 if hasattr(abc, 'ABC'): ABC = abc.ABC else: class ABC(object): """Helper class that provides a standard way to create an ABC using inheritance. """ __metaclass__ = abc.ABCMeta __slots__ = () if hasattr(os, 'PathLike'): PathLike = os.PathLike else: class PathLike(ABC): """Abstract base class for implementing the file system path protocol.""" @abc.abstractmethod def __fspath__(self): """Return the file system path representation of the object.""" raise NotImplementedError @classmethod def __subclasshook__(cls, subclass): return ( hasattr(subclass, '__fspath__') or # Make a concession for older `pathlib` versions:g (hasattr(subclass, 'open') and 'path' in subclass.__name__.lower()) ) try: iscoroutinefunction = inspect.iscoroutinefunction except AttributeError: iscoroutinefunction = lambda whatever: False # Lolz if PY3: string_types = (str,) text_type = str else: string_types = (basestring,) text_type = unicode
BugsInPy/BugsInPy/temp/projects/PySnooper/bug-1-fixed/PySnooper/pysnooper/pycompat.py,BugsInPy/BugsInPy/temp/projects/PySnooper/bug-1-buggy/PySnooper/pysnooper/pycompat.py